repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| agpl-3.0 |
jstrobl/rts2 | scripts/rts2saf/rts2saf/analyze.py | 3 | 14329 | #!/usr/bin/python
# (C) 2013, Markus Wildi, [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
"""Analysis defines the extremes of FWHM and optionally flux.
"""
__author__ = '[email protected]'
import os
import numpy as np
import copy
from rts2saf.fitfunction import FitFunction
from rts2saf.fitdisplay import FitDisplay
from rts2saf.data import DataFitFwhm,DataFitFlux,ResultFit, ResultMeans
from rts2saf.ds9region import Ds9DisplayThread
class SimpleAnalysis(object):
"""Analysis of extremes of FWHM and optionally of flux.
:var debug: enable more debug output with --debug and --level
:var date: date string as it appears on plot
:var dataSxtr: list of :py:mod:`rts2saf.data.DataSxtr`
:var Ds9Display: start ``DS9`` and display FITS files with regions
:var FitDisplay: display fit result
:var ftwName: filter wheel name
:var ftName: filter name
:var focRes: focuser resolution as given in FOCUSER_RESOLUTION
:var ev: helper module for house keeping, :py:mod:`rts2saf.environ.Environment`
:var rt: run time configuration, :py:mod:`rts2saf.config.Configuration`, usually read from /usr/local/etc/rts2/rts2saf/rts2saf.cfg
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self,
debug=False,
date=None,
dataSxtr=None,
Ds9Display=False,
FitDisplay=False,
xdisplay = None,
ftwName=None,
ftName=None,
focRes=None,
ev=None,
rt=None,
logger=None):
self.debug=debug
self.date=date
self.dataSxtr=dataSxtr
self.Ds9Display=Ds9Display
self.FitDisplay=FitDisplay
self.xdisplay = xdisplay
self.ftwName=ftwName
self.ftName=ftName
self.focRes=focRes
self.ev=ev
self.rt= rt
self.logger=logger
self.dataFitFwhm=None
self.resultFitFwhm=None
self.resultMeansFwhm = None
#
self.dataFitFlux=None
self.resultFitFlux=None
self.resultMeansFlux = None
# ToDo must reside outside
self.fd=None
self.i_flux=None
def _fitFwhm(self):
minFitPos, minFitFwhm, fitPar, fitFlag= FitFunction(
dataFit=self.dataFitFwhm,
logger=self.logger,
).fitData()
if minFitPos:
self.logger.info('_fitFwhm: FWHM FOC_DEF: {0:5d} : fitted minimum position, {1:4.1f}px FWHM, {2} ambient temperature'.format(int(minFitPos), minFitFwhm, self.dataFitFwhm.ambientTemp))
else:
self.logger.warn('analyze: fit failed')
self.resultFitFwhm=ResultFit()
return
self.resultFitFwhm=ResultFit(
ambientTemp = self.dataFitFwhm.ambientTemp,
ftName = self.dataFitFwhm.ftName,
extrFitPos = minFitPos,
extrFitVal = minFitFwhm,
fitPar = fitPar,
fitFlag = fitFlag,
color = 'blue',
ylabel = 'FWHM [px]: blue',
titleResult = 'fwhm:{0:5d}'.format(int(minFitPos))
)
def _fitFlux(self):
maxFitPos, maxFitFlux, fitPar, fitFlag = FitFunction(
dataFit = self.dataFitFlux,
logger = self.logger,
).fitData()
if fitFlag:
self.logger.info('analyze: Flux FOC_DEF: {0:5d} : fitted maximum position, {1:4.1f}[a.u.] Flux, {2} ambient temperature'.format(int(maxFitPos), maxFitFlux, self.dataFitFlux.ambientTemp))
else:
self.logger.warn('analyze: fit flux failed')
self.resultFitFlux=ResultFit()
return
self.resultFitFlux=ResultFit(
ambientTemp = self.dataFitFlux.ambientTemp,
ftName = self.dataFitFlux.ftName,
extrFitPos = maxFitPos,
extrFitVal= maxFitFlux,
fitPar = fitPar,
fitFlag = fitFlag,
color = 'red',
ylabel = 'FWHM [px]: blue Flux [a.u.]: red',
titleResult = 'fwhm:{0:5d}, flux: {1:5d}'
.format(int(self.resultFitFwhm.extrFitPos), int(maxFitPos))
)
def analyze(self):
"""Fit function to data and calculate weighted means.
:return: :py:mod:`rts2saf.data.ResultFit`, :py:mod:`rts2saf.data.ResultMeans`
"""
# ToDo lazy !!!!!!!!!!
# create an average and std
# ToDo decide which ftName from which ftw!!
if len(self.dataSxtr)>0:
bPth,fn=os.path.split(self.dataSxtr[0].fitsFn)
ftName=self.dataSxtr[0].ftName
else:
bPth='/tmp'
ftName='noFtName'
if len(self.dataSxtr)>0:
ambientTemp = self.dataSxtr[0].ambientTemp
else:
ambientTemp='noAmbientTemp'
plotFn = self.ev.expandToPlotFileName(plotFn='{0}/{1}.png'.format(bPth,ftName))
# fwhm
if len(self.dataSxtr)>0:
ftName = self.dataSxtr[0].ftName
else:
ftName = 'NoFtname'
self.dataFitFwhm = DataFitFwhm(
dataSxtr = self.dataSxtr,
plotFn = plotFn,
ambientTemp = ambientTemp,
ftName = ftName,
)
self._fitFwhm()
# weighted means
if self.rt.cfg['WEIGHTED_MEANS']:
self.resultMeansFwhm = ResultMeans(dataFit=self.dataFitFwhm, logger=self.logger)
self.resultMeansFwhm.calculate(var='FWHM')
try:
self.i_flux = self.dataSxtr[0].fields.index('FLUX_MAX')
except:
pass
if self.i_flux is not None:
self.dataFitFlux= DataFitFlux(
dataSxtr=self.dataSxtr,
dataFitFwhm=self.dataFitFwhm,
plotFn=plotFn,
ambientTemp=self.dataSxtr[0].ambientTemp,
ftName=self.dataSxtr[0].ftName
)
self._fitFlux()
# weighted means
if self.rt.cfg['WEIGHTED_MEANS']:
self.resultMeansFlux=ResultMeans(dataFit=self.dataFitFlux, logger=self.logger)
self.resultMeansFlux.calculate(var='Flux')
return self.resultFitFwhm, self.resultMeansFwhm, self.resultFitFlux, self.resultMeansFlux
def display(self):
"""Plot data, fitted function for FWHM and optionally flux.
"""
# plot them through ds9 in parallel to the fit
ds9DisplayThread = None
if self.Ds9Display and self.xdisplay:
# start thread
ds9DisplayThread = Ds9DisplayThread(debug=self.debug, dataSxtr=self.dataSxtr, logger= self.logger)
ds9DisplayThread.start()
elif self.Ds9Display and not self.xdisplay:
self.logger.warn('analyze: OOOOOOOOPS, no ds9 display available')
if self.dataSxtr[0].assocFn is not None:
ft=FitDisplay(date = self.date, comment='ASSOC', logger=self.logger)
else:
ft=FitDisplay(date = self.date, logger=self.logger)
if self.i_flux is None:
ft.fitDisplay(dataFit=self.dataFitFwhm, resultFit=self.resultFitFwhm, display=self.FitDisplay, xdisplay = self.xdisplay)
else:
# plot FWHM but don't show
ft.fitDisplay(dataFit=self.dataFitFwhm, resultFit=self.resultFitFwhm, show=False, display=self.FitDisplay, xdisplay = self.xdisplay)
ft.fitDisplay(dataFit=self.dataFitFlux, resultFit=self.resultFitFlux, display=self.FitDisplay, xdisplay = self.xdisplay)
# very important (otherwise all plots show up in next show())
ft.ax1=None
# http://stackoverflow.com/questions/741877/how-do-i-tell-matplotlib-that-i-am-done-with-a-plot
ft.fig.clf()
ft.fig=None
ft=None
# stop ds9 display thread
if self.Ds9Display and self.xdisplay:
ds9DisplayThread.join(timeout=1.)
import numpy
from itertools import ifilterfalse
from itertools import ifilter
# ToDo at the moment this method is an demonstrator
class CatalogAnalysis(object):
"""Analysis of extremes of FWHM and optionally of flux restricted to additional criteria based on SExtractor parameters.
:var debug: enable more debug output with --debug and --level
:var date: date string as it appears on plot
:var dataSxtr: list of :py:mod:`rts2saf.data.DataSxtr`
:var Ds9Display: start ``DS9`` and display FITS files with regions
:var FitDisplay: display fit result
:var ftwName: filter wheel name
:var ftName: filter name
:var moduleName: name of the module of type :py:mod:`rts2saf.criteria_radius.Criteria` or similar
:var focRes: focuser resolution as given in FOCUSER_RESOLUTION
:var rt: run time configuration, :py:mod:`rts2saf.config.Configuration`, usually read from /usr/local/etc/rts2/rts2saf/rts2saf.cfg
:var ev: helper module for house keeping, :py:mod:`rts2saf.environ.Environment`
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self,
debug=False,
date = None,
dataSxtr=None,
Ds9Display=False,
FitDisplay=False,
xdisplay = None,
ftwName=None,
ftName=None,
focRes=None,
moduleName=None,
ev=None,
rt=None,
logger=None):
self.debug=debug
self.date = date
self.dataSxtr=dataSxtr
self.Ds9Display=Ds9Display
self.FitDisplay=FitDisplay
self.xdisplay = xdisplay
self.ftwName=ftwName
self.ftName=ftName
self.focRes=focRes
self.moduleName=moduleName
self.ev=ev
self.rt=rt
self.logger=logger
self.criteriaModule=None
self.cr=None
self.i_flux=None
self.anAcc = None
self.anRej = None
self.anAll = None
def _loadCriteria(self):
# http://stackoverflow.com/questions/951124/dynamic-loading-of-python-modules
# Giorgio Gelardi ["*"]!
self.criteriaModule=__import__(self.moduleName, fromlist=["*"])
self.cr=self.criteriaModule.Criteria(dataSxtr=self.dataSxtr, rt=self.rt)
def selectAndAnalyze(self):
"""Fit function for accepted, rejected and all data and calculate resp. weighted means .
:return: :py:mod:`rts2saf.data.ResultFit` for accepted, rejected and all data, resp. :py:mod:`rts2saf.data.ResultMeans`
"""
self._loadCriteria()
# ToDo glitch
i_f = self.dataSxtr[0].fields.index('FWHM_IMAGE')
acceptedDataSxtr=list()
rejectedDataSxtr=list()
for dSx in self.dataSxtr:
adSx=copy.deepcopy(dSx)
acceptedDataSxtr.append(adSx)
adSx.catalog= list(ifilter(self.cr.decide, adSx.catalog))
nsFwhm=np.asarray([x[i_f] for x in adSx.catalog])
adSx.fwhm=numpy.median(nsFwhm)
adSx.stdFwhm=numpy.std(nsFwhm)
try:
self.i_flux = adSx.fields.index('FLUX_MAX')
except:
pass
if self.i_flux is not None:
adSx.fillFlux(i_flux= self.i_flux, logger=self.logger)
rdSx=copy.deepcopy(dSx)
rejectedDataSxtr.append(rdSx)
rdSx.catalog= list(ifilterfalse(self.cr.decide, rdSx.catalog))
nsFwhm=np.asarray([ x[i_f] for x in rdSx.catalog])
rdSx.fwhm=numpy.median(nsFwhm)
rdSx.stdFwhm=numpy.std(nsFwhm)
if self.i_flux is not None:
rdSx.fillFlux(i_flux= self.i_flux, logger=self.logger)
self.anAcc=SimpleAnalysis(
debug=self.debug,
date=self.date,
dataSxtr=acceptedDataSxtr,
Ds9Display=self.Ds9Display,
FitDisplay=self.FitDisplay,
xdisplay = self.xdisplay,
focRes=self.focRes,
ev=self.ev,
rt=self.rt,
logger=self.logger)
accRFtFwhm, accRMnsFwhm, accRFtFlux, accRMnsFlux=self.anAcc.analyze()
if self.Ds9Display or self.FitDisplay:
if accRFtFwhm.fitFlag:
self.anAcc.display()
self.anRej=SimpleAnalysis(
debug=self.debug,
date=self.date,
dataSxtr=rejectedDataSxtr,
Ds9Display=self.Ds9Display,
FitDisplay=self.FitDisplay,
focRes=self.focRes,
ev=self.ev,
rt=self.rt,
logger=self.logger)
rejRFtFwhm, recRMnsFwhm, rejRFtFlux, recRMnsFlux=self.anRej.analyze()
# if self.Ds9Display or self.FitDisplay:
# if accRFtFwhm.fitFlag:
# an.display()
#
self.anAll=SimpleAnalysis(
debug=self.debug,
date=self.date,
dataSxtr=self.dataSxtr,
Ds9Display=self.Ds9Display,
FitDisplay=self.FitDisplay,
focRes=self.focRes,
ev=self.ev,
rt=self.rt,
logger=self.logger)
allRFtFwhm, allRMnsFwhm, allRFtFlux, allRMnsFlux=self.anAll.analyze()
#
# if self.Ds9Display or self.FitDisplay:
# if accRFtFwhm.fitFlag:
# an.display()
# ToDo here are three objects
# ToDo expand to Flux
return accRFtFwhm, rejRFtFwhm, allRFtFwhm, accRMnsFwhm, recRMnsFwhm, allRMnsFwhm
| lgpl-3.0 |
liam2/larray | larray/core/axis.py | 2 | 136849 | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import fnmatch
import re
import sys
import warnings
from itertools import product
import numpy as np
import pandas as pd
from larray.core.abstractbases import ABCAxis, ABCAxisReference, ABCArray
from larray.core.expr import ExprNode
from larray.core.group import (Group, LGroup, IGroup, IGroupMaker, _to_tick, _to_ticks, _to_key, _seq_summary,
_range_to_slice, _seq_group_to_name, _translate_group_key_hdf, remove_nested_groups)
from larray.util.oset import *
from larray.util.misc import (duplicates, array_lookup2, ReprString, index_by_id, renamed_to, common_type, LHDFStore,
lazy_attribute, _isnoneslice, unique_multi, Product)
from larray.util.compat import (basestring, PY2, unicode, long)
np_frompyfunc = np.frompyfunc
class Axis(ABCAxis):
r"""
Represents an axis. It consists of a name and a list of labels.
Parameters
----------
labels : array-like or int
collection of values usable as labels, i.e. numbers or strings or the size of the axis.
In the last case, a wildcard axis is created.
name : str or Axis, optional
name of the axis or another instance of Axis. In the second case, the name of the other axis is simply copied.
By default None.
Attributes
----------
labels : array-like or int
collection of values usable as labels, i.e. numbers or strings
name : str
name of the axis. None in the case of an anonymous axis.
Examples
--------
>>> gender = Axis(['M', 'F'], 'gender')
>>> gender
Axis(['M', 'F'], 'gender')
>>> gender.name
'gender'
>>> list(gender.labels)
['M', 'F']
using a string definition
>>> gender = Axis('gender=M,F')
>>> gender
Axis(['M', 'F'], 'gender')
>>> age = Axis('age=0..9')
>>> age
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'age')
>>> code = Axis('code=A,C..E,F..G,Z')
>>> code
Axis(['A', 'C', 'D', 'E', 'F', 'G', 'Z'], 'code')
a wildcard axis only needs a length
>>> row = Axis(10, 'row')
>>> row
Axis(10, 'row')
>>> row.labels
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
axes can also be defined without name
>>> anonymous = Axis('0..4')
>>> anonymous
Axis([0, 1, 2, 3, 4], None)
"""
__slots__ = ('name', '__mapping', '__sorted_keys', '__sorted_values', '_labels', '_length', '_iswildcard')
# ticks instead of labels?
def __init__(self, labels, name=None):
if isinstance(labels, Group) and name is None:
name = labels.axis
if isinstance(name, Axis):
name = name.name
if isinstance(labels, basestring):
if '=' in labels:
name, labels = [o.strip() for o in labels.split('=')]
elif '..' not in labels and ',' not in labels:
warnings.warn("Arguments 'name' and 'labels' of Axis constructor have been inverted in "
"version 0.22 of larray. Please check you are passing labels first and name "
"as second argument.", FutureWarning, stacklevel=2)
name, labels = labels, name
# make sure we do not have np.str_ as it causes problems down the
# line with xlwings. Cannot use isinstance to check that though.
name_is_python_str = type(name) is unicode or type(name) is bytes
if isinstance(name, basestring) and not name_is_python_str:
name = unicode(name)
if name is not None and not isinstance(name, (int, basestring)):
raise TypeError("Axis name should be None, int or str but is: %s (%s)" % (name, type(name).__name__))
self.name = name
self._labels = None
self.__mapping = None
self.__sorted_keys = None
self.__sorted_values = None
self._length = None
self._iswildcard = False
# set _labels, _length and _iswildcard via the property
self.labels = labels
@property
def _mapping(self):
# To map labels with their positions
mapping = self.__mapping
if mapping is None:
labels = self._labels
# TODO: this would be more efficient for wildcard axes but does not work in all cases
# mapping = labels
mapping = {label: i for i, label in enumerate(labels)}
if not self._iswildcard:
# we have no choice but to do that, otherwise we could not make geo['Brussels'] work efficiently
# (we could have to traverse the whole mapping checking for each name, which is not an option)
# TODO: only do this if labels.dtype is object, or add "contains_lgroup" flag in above code
# (if any(...))
mapping.update({label.name: i for i, label in enumerate(labels) if isinstance(label, Group)})
self.__mapping = mapping
return mapping
def _update_key_values(self):
mapping = self._mapping
if mapping:
sorted_keys, sorted_values = tuple(zip(*sorted(mapping.items())))
else:
sorted_keys, sorted_values = (), ()
keys, values = np.array(sorted_keys), np.array(sorted_values)
self.__sorted_keys = keys
self.__sorted_values = values
return keys, values
@property
def _sorted_keys(self):
if self.__sorted_keys is None:
keys, _ = self._update_key_values()
return self.__sorted_keys
@property
def _sorted_values(self):
values = self.__sorted_values
if values is None:
_, values = self._update_key_values()
return values
@lazy_attribute
def i(self):
r"""
Allows to define a subset using positions along the axis
instead of labels.
Examples
--------
>>> from larray import ndtest
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> arr = ndtest([sex, time])
>>> arr
sex\\time 2007 2008 2009 2010
M 0 1 2 3
F 4 5 6 7
>>> arr[time.i[0, -1]]
sex\\time 2007 2010
M 0 3
F 4 7
"""
return IGroupMaker(self)
@property
def labels(self):
r"""
labels of the axis.
"""
return self._labels
@labels.setter
def labels(self, labels):
if labels is None:
raise TypeError("labels should be a sequence or a single int, not None")
if isinstance(labels, (int, long, np.integer)):
length = labels
labels = np.arange(length)
iswildcard = True
else:
labels = _to_ticks(labels, parse_single_int=True)
length = len(labels)
iswildcard = False
self._length = length
self._labels = labels
self._iswildcard = iswildcard
def by(self, length, step=None, template=None):
r"""Split axis into several groups of specified length.
Parameters
----------
length : int
length of groups
step : int, optional
step between groups. Defaults to length.
template : str, optional
template describing how group names are generated. It is a string containing specific arguments
written inside brackets {}. Available arguments are {start} and {end} representing the first and last label
of each group. By default, template is defined as '{start}:{end}'.
Notes
-----
step can be smaller than length, in which case, this will produce overlapping groups.
Returns
-------
list of Group
Examples
--------
>>> age = Axis('age=0..6')
>>> age
Axis([0, 1, 2, 3, 4, 5, 6], 'age')
>>> age.by(3)
(age.i[0:3] >> '0:2', age.i[3:6] >> '3:5', age.i[6:7] >> '6')
>>> age.by(3, step=2)
(age.i[0:3] >> '0:2', age.i[2:5] >> '2:4', age.i[4:7] >> '4:6', age.i[6:7] >> '6')
>>> age.by(3, template='{start}-{end}')
(age.i[0:3] >> '0-2', age.i[3:6] >> '3-5', age.i[6:7] >> '6')
"""
return self[:].by(length, step, template)
def extend(self, labels):
r"""
Append new labels to an axis or increase its length in case of wildcard axis.
Note that `extend` does not occur in-place: a new axis object is allocated, filled and returned.
Parameters
----------
labels : int, iterable or Axis
New labels to append to the axis. Passing directly another Axis is also possible.
If the current axis is a wildcard axis, passing a length is enough.
Returns
-------
Axis
A copy of the axis with new labels appended to it or with increased length (if wildcard).
Examples
--------
>>> time = Axis([2007, 2008], 'time')
>>> time
Axis([2007, 2008], 'time')
>>> time.extend([2009, 2010])
Axis([2007, 2008, 2009, 2010], 'time')
>>> waxis = Axis(10, 'wildcard_axis')
>>> waxis
Axis(10, 'wildcard_axis')
>>> waxis.extend(5)
Axis(15, 'wildcard_axis')
>>> waxis.extend([11, 12, 13, 14])
Traceback (most recent call last):
...
ValueError: Axis to append must (not) be wildcard if self is (not) wildcard
"""
other = labels if isinstance(labels, Axis) else Axis(labels)
if self.iswildcard != other.iswildcard:
raise ValueError("Axis to append must (not) be wildcard if self is (not) wildcard")
labels = self._length + other._length if self.iswildcard else np.append(self.labels, other.labels)
return Axis(labels, self.name)
def split(self, sep='_', names=None, regex=None, return_labels=False):
r"""Split axis and returns a list of Axis.
Parameters
----------
sep : str, optional
Delimiter to use for splitting. Defaults to '_'.
When `regex` is provided, the delimiter is only used on `names` if given as one string or on axis name if
`names` is None.
names : str or list of str, optional
Names of resulting axes. Defaults to None.
regex : str, optional
Use regex instead of delimiter to split labels. Defaults to None.
labels : bool, optional
Whether or not split labels must be returned (as a tuple of tuples). These labels are suitable for indexing
via array.points[labels]. Defaults to False.
Returns
-------
list of Axis or (list of Axis, array-like)
Examples
--------
>>> a_b = Axis('a_b=a0_b0,a0_b1,a0_b2,a1_b0,a1_b1,a1_b2')
>>> a_b.split()
[Axis(['a0', 'a1'], 'a'), Axis(['b0', 'b1', 'b2'], 'b')]
"""
if names is None:
if self.name is None:
names = None
elif sep not in self.name:
raise ValueError('{} not found in self name ({})'.format(sep, self.name))
else:
names = self.name.split(sep)
elif isinstance(names, str):
if sep not in names:
raise ValueError('{} not found in names ({})'.format(sep, names))
else:
names = names.split(sep)
else:
assert all(isinstance(name, str) for name in names)
if not regex:
# np.char.split does not work on arrays with object dtype
labels = self.labels if self.labels.dtype.kind != 'O' else self.labels.astype(str)
# gives us an array of lists
split_labels = np.char.split(labels, sep)
else:
match = re.compile(regex).match
split_labels = [match(l).groups() for l in self.labels]
if names is None:
names = [None] * len(split_labels)
indexing_labels = zip(*split_labels)
if return_labels:
indexing_labels = tuple(indexing_labels)
# not using np.unique because we want to keep the original order
split_axes = [Axis(unique_list(ax_labels), name) for ax_labels, name in zip(indexing_labels, names)]
if return_labels:
indexing_labels = tuple(axis[labels] for axis, labels in zip(split_axes, indexing_labels))
return split_axes, indexing_labels
else:
return split_axes
def insert(self, new_labels, before=None, after=None):
r"""
Return a new axis with `new_labels` inserted before `before` or after `after`.
Parameters
----------
new_labels : scalar, tuple/list/array of scalars, Group or Axis
New label(s) to append to the axis.
before : scalar or Group, optional
Label or group before which to insert `new_labels`.
after : scalar or Group, optional
Label or group after which to insert `new_labels`.
Returns
-------
Axis
A copy of the axis with the new labels inserted.
Examples
--------
>>> time = Axis([2007, 2009], 'time')
>>> time.insert(2008, before=2009)
Axis([2007, 2008, 2009], 'time')
>>> time.insert(2008, after=2007)
Axis([2007, 2008, 2009], 'time')
>>> time.insert(2008, before=time.i[1])
Axis([2007, 2008, 2009], 'time')
>>> time.insert(2008, after=time.i[0])
Axis([2007, 2008, 2009], 'time')
>>> b = Axis(['b1', 'b2'], 'b')
>>> b.insert('b1.5', before='b2')
Axis(['b1', 'b1.5', 'b2'], 'b')
>>> b.insert(['b1.1', 'b1.2'], before='b2')
Axis(['b1', 'b1.1', 'b1.2', 'b2'], 'b')
>>> c = Axis(['c1', 'c2'], 'c')
>>> b.insert(c, before='b2')
Axis(['b1', 'c1', 'c2', 'b2'], 'b')
"""
if sum([before is not None, after is not None]) != 1:
raise ValueError("must specify exactly one of before or after")
if before is not None:
before = self.index(before)
else:
assert after is not None
before = self.index(after) + 1
if isinstance(new_labels, Axis):
new_labels = new_labels.labels
elif isinstance(new_labels, Group):
new_labels = new_labels.eval()
else:
if np.isscalar(new_labels):
new_labels = [new_labels]
new_labels = np.asarray(new_labels)
current_labels = self.labels
labels_type = common_type((current_labels, new_labels))
if labels_type is object:
# astype always copies, while asarray only copies if necessary
current_labels = np.asarray(current_labels, dtype=object)
new_labels = np.asarray(new_labels, dtype=object)
# not using np.insert to avoid inserted string labels being truncated (because of current_labels.dtype)
res_labels = np.concatenate((current_labels[:before], new_labels, current_labels[before:]))
return Axis(res_labels, self.name)
@property
def iswildcard(self):
return self._iswildcard
def _group(self, *args, **kwargs):
r"""
Deprecated.
Parameters
----------
*args
(collection of) selected label(s) to form a group.
**kwargs
name of the group. There is no other accepted keywords.
Examples
--------
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> odd_years = time._group([2007, 2009], name='odd_years')
>>> odd_years
time[2007, 2009] >> 'odd_years'
"""
name = kwargs.pop('name', None)
if kwargs:
raise ValueError("invalid keyword argument(s): %s" % list(kwargs.keys()))
key = args[0] if len(args) == 1 else args
return self[key] >> name if name else self[key]
def group(self, *args, **kwargs):
group_name = kwargs.pop('name', None)
key = args[0] if len(args) == 1 else args
syntax = '{}[{}]'.format(self.name if self.name else 'axis', key)
if group_name is not None:
syntax += ' >> {}'.format(repr(group_name))
raise NotImplementedError('Axis.group is deprecated. Use {} instead.'.format(syntax))
def all(self, name=None):
r"""
(Deprecated) Returns a group containing all labels.
Parameters
----------
name : str, optional
Name of the group. If not provided, name is set to 'all'.
"""
axis_name = self.name if self.name else 'axis'
group_name = name if name else 'all'
raise NotImplementedError('Axis.all is deprecated. Use {}[:] >> {} instead.'
.format(axis_name, repr(group_name)))
# TODO: make this method private
def subaxis(self, key):
r"""
Returns an axis for a sub-array.
Parameters
----------
key : int, or collection (list, slice, array, Array) of them
Indices-based key to use for the new axis.
Returns
-------
Axis
Subaxis. If key is a None slice, the original Axis is returned.
If key is an Array, the list of axes is returned.
Examples
--------
>>> age = Axis(range(100), 'age')
>>> age.subaxis(range(10, 19))
Axis([10, 11, 12, 13, 14, 15, 16, 17, 18], 'age')
"""
if isinstance(key, slice) and key.start is None and key.stop is None and key.step is None:
return self
if isinstance(key, ABCArray):
return key.axes
# TODO: compute length for wildcard axes more efficiently
labels = len(self.labels[key]) if self.iswildcard else self.labels[key]
# we must NOT modify the axis name, even though this creates a new axis that is independent from the original
# one because the original name is probably what users will want to use to filter
return Axis(labels, self.name)
def iscompatible(self, other):
r"""
Checks if self is compatible with another axis.
* Two non-wildcard axes are compatible if they have the same name and labels.
* A wildcard axis of length 1 is compatible with any other axis sharing the same name.
* A wildcard axis of length > 1 is compatible with any axis of the same length or length 1 and sharing the
same name.
Parameters
----------
other : Axis
Axis to compare with.
Returns
-------
bool
True if input axis is compatible with self, False otherwise.
Examples
--------
>>> a10 = Axis(range(10), 'a')
>>> wa10 = Axis(10, 'a')
>>> wa1 = Axis(1, 'a')
>>> b10 = Axis(range(10), 'b')
>>> a10.iscompatible(b10)
False
>>> a10.iscompatible(wa10)
True
>>> a10.iscompatible(wa1)
True
>>> wa1.iscompatible(b10)
False
"""
if self is other:
return True
if not isinstance(other, Axis):
return False
if self.name is not None and other.name is not None and self.name != other.name:
return False
if self.iswildcard or other.iswildcard:
# wildcard axes of length 1 match with anything
# wildcard axes of length > 1 match with equal len or len 1
return len(self) == 1 or len(other) == 1 or len(self) == len(other)
else:
return np.array_equal(self.labels, other.labels)
def equals(self, other):
r"""
Checks if self is equal to another axis.
Two axes are equal if they have the same name and label(s).
Parameters
----------
other : Axis
Axis to compare with.
Returns
-------
bool
True if input axis is equal to self, False otherwise.
Examples
--------
>>> age = Axis(range(5), 'age')
>>> age_2 = Axis(5, 'age')
>>> age_3 = Axis(range(5), 'young children')
>>> age_4 = Axis([0, 1, 2, 3, 4], 'age')
>>> age.equals(age_2)
False
>>> age.equals(age_3)
False
>>> age.equals(age_4)
True
"""
if self is other:
return True
# this might need to change if we ever support wildcard axes with real labels
return isinstance(other, Axis) and self.name == other.name and self.iswildcard == other.iswildcard and \
(len(self) == len(other) if self.iswildcard else np.array_equal(self.labels, other.labels))
def matching(self, deprecated=None, pattern=None, regex=None):
r"""
Returns a group with all the labels matching the specified pattern or regular expression.
Parameters
----------
pattern : str or Group, optional
Pattern to match.
* `?` matches any single character
* `*` matches any number of characters
* [seq] matches any character in seq
* [!seq] matches any character not in seq
To match any of the special characters above, wrap the character in brackets. For example, `[?]` matches
the character `?`.
regex : str or Group, optional
Regular expression pattern to match. Regular expressions are more powerful than what the simple patterns
supported by the `pattern` argument but are also more complex to write.
See `Regular Expression <https://docs.python.org/3/library/re.html>`_ for more details about how to build
a regular expression pattern.
Returns
-------
LGroup
Group containing all the labels matching the pattern.
Examples
--------
>>> people = Axis(['Bruce Wayne', 'Bruce Willis', 'Waldo', 'Arthur Dent', 'Harvey Dent'], 'people')
>>> # All labels starting with "A" and ending with "t"
>>> people.matching(pattern='A*t')
people['Arthur Dent']
>>> # All labels containing "W" and ending with "s"
>>> people.matching(pattern='*W*s')
people['Bruce Willis']
>>> # All labels with exactly 5 characters
>>> people.matching(pattern='?????')
people['Waldo']
>>> # All labels starting with either "A" or "B"
>>> people.matching(pattern='[AB]*')
people['Bruce Wayne', 'Bruce Willis', 'Arthur Dent']
Regular expressions are more powerful but usually harder to write and less readable
>>> # All labels starting with "W" and ending with "o"
>>> people.matching(regex='A.*t')
people['Arthur Dent']
>>> # All labels not containing character "a"
>>> people.matching(regex='^[^a]*$')
people['Bruce Willis', 'Arthur Dent']
"""
if deprecated is not None:
assert pattern is None and regex is None
regex = deprecated
warnings.warn("Axis.matching() first argument will change to `pattern` in a later release. "
"If your pattern is a regular expression, use Axis.matching(regex='yourpattern')."
"If your pattern is a 'simple pattern', use Axis.matching(pattern='yourpattern').",
FutureWarning, stacklevel=2)
if pattern is not None and regex is not None:
raise ValueError("Cannot use both `pattern` and `regex` arguments at the same time in Axis.matching()")
if pattern is None and regex is None:
raise ValueError("Must provide either `pattern` or `regex` argument in Axis.matching()")
if isinstance(regex, Group):
regex = regex.eval()
if pattern is not None:
if isinstance(pattern, Group):
pattern = pattern.eval()
regex = fnmatch.translate(pattern)
match = re.compile(regex).match
return LGroup([v for v in self.labels if match(v)], axis=self)
matches = renamed_to(matching, 'matches')
def startingwith(self, prefix):
r"""
Returns a group with the labels starting with the specified string.
Parameters
----------
prefix : str or Group
The prefix to search for.
Returns
-------
LGroup
Group containing all the labels starting with the given string.
Examples
--------
>>> people = Axis(['Bruce Wayne', 'Bruce Willis', 'Waldo', 'Arthur Dent', 'Harvey Dent'], 'people')
>>> people.startingwith('Bru')
people['Bruce Wayne', 'Bruce Willis']
"""
if isinstance(prefix, Group):
prefix = prefix.eval()
return LGroup([v for v in self.labels if v.startswith(prefix)], axis=self)
startswith = renamed_to(startingwith, 'startswith')
def endingwith(self, suffix):
r"""
Returns a group with the labels ending with the specified string.
Parameters
----------
suffix : str or Group
The suffix to search for.
Returns
-------
LGroup
Group containing all the labels ending with the given string.
Examples
--------
>>> people = Axis(['Bruce Wayne', 'Bruce Willis', 'Waldo', 'Arthur Dent', 'Harvey Dent'], 'people')
>>> people.endingwith('Dent')
people['Arthur Dent', 'Harvey Dent']
"""
if isinstance(suffix, Group):
suffix = suffix.eval()
return LGroup([v for v in self.labels if v.endswith(suffix)], axis=self)
endswith = renamed_to(endingwith, 'endswith')
def containing(self, substring):
r"""
Returns a group with all the labels containing the specified substring.
Parameters
----------
substring : str or Group
The substring to search for.
Returns
-------
LGroup
Group containing all the labels containing the substring.
Examples
--------
>>> people = Axis(['Bruce Wayne', 'Bruce Willis', 'Arthur Dent'], 'people')
>>> people.containing('Will')
people['Bruce Willis']
"""
if isinstance(substring, Group):
substring = substring.eval()
return LGroup([v for v in self.labels if substring in v], axis=self)
def __len__(self):
return self._length
def __iter__(self):
return iter([IGroup(i, None, self) for i in range(self._length)])
def __getitem__(self, key):
r"""
Returns a group (list or unique element) of label(s) usable in .sum or .filter
key is a label-based key (other axis, slice and fancy indexing are supported)
Returns
-------
Group
group containing selected label(s)/position(s).
Notes
-----
key is label-based (slice and fancy indexing are supported)
"""
# if isinstance(key, basestring):
# key = to_keys(key)
def isscalar(k):
return np.isscalar(k) or (isinstance(k, Group) and np.isscalar(k.key))
if isinstance(key, Axis):
key = key.labels
# the not all(np.isscalar) part is necessary to support axis[a, b, c] and axis[[a, b, c]]
if isinstance(key, (tuple, list)) and not all(isscalar(k) for k in key):
# this creates a group for each key if it wasn't and retargets IGroup
list_res = [self[k] for k in key]
return list_res if isinstance(key, list) else tuple(list_res)
# allow targeting a label from an aggregated axis with the group which created it
elif (not isinstance(self, AxisReference) and
isinstance(key, Group) and
isinstance(key.axis, Axis) and
key.axis.name == self.name and
key.name in self):
return LGroup(key.name, None, self)
# elif isinstance(key, basestring) and key in self:
# TODO: this is an awful workaround to avoid the "processing" of string keys which exist as is in the axis
# (probably because the string was used in an aggregate function to create the label)
# res = LGroup(slice(None), None, self)
# res.key = key
# return res
name = key.name if isinstance(key, Group) else None
return LGroup(key, name, self)
def _ipython_key_completions_(self):
return list(self.labels)
def __contains__(self, key):
# TODO: ideally, _to_tick shouldn't be necessary, the __hash__ and __eq__ of Group should include this
return _to_tick(key) in self._mapping
def __hash__(self):
return id(self)
# needed for Python 2 only (on Python2 all classes defining __slots__ need to define __getstate__/__setstate__ too)
def __getstate__(self):
return self.name, self._length if self._iswildcard else self._labels
# needed for Python 2 only
def __setstate__(self, state):
name, labels = state
self.name = name
self._labels = None
self._length = None
self._iswildcard = False
self.__mapping = None
self.__sorted_keys = None
self.__sorted_values = None
# set _labels, _length and _iswildcard via the property
self.labels = labels
def _is_key_type_compatible(self, key):
key_kind = np.dtype(type(key)).kind
label_kind = self.labels.dtype.kind
# on Python2, ascii-only unicode string can match byte strings (and vice versa), so we shouldn't be more picky
# here than dict hashing
str_key = key_kind in ('S', 'U')
str_label = label_kind in ('S', 'U')
py2_str_match = PY2 and str_key and str_label
# object kind can match anything
return key_kind == label_kind or key_kind == 'O' or label_kind == 'O' or py2_str_match
def index(self, key):
r"""
Translates a label key to its numerical index counterpart.
Parameters
----------
key : key
Everything usable as a key.
Returns
-------
(array of) int
Numerical index(ices) of (all) label(s) represented by the key
Notes
-----
Fancy index with boolean vectors are passed through unmodified
Examples
--------
>>> people = Axis(['John Doe', 'Bruce Wayne', 'Bruce Willis', 'Waldo', 'Arthur Dent', 'Harvey Dent'], 'people')
>>> people.index('Waldo')
3
>>> people.index(people.containing('Bruce'))
array([1, 2])
"""
mapping = self._mapping
if isinstance(key, Group) and key.axis is not self and key.axis is not None:
try:
# XXX: this is potentially very expensive if key.key is an array or list and should be tried as a last
# resort
potential_tick = _to_tick(key)
# avoid matching 0 against False or 0.0, note that None has object dtype and so always pass this test
if self._is_key_type_compatible(potential_tick):
return mapping[potential_tick]
# we must catch TypeError because key might not be hashable (eg slice)
# IndexError is for when mapping is an ndarray
except (KeyError, TypeError, IndexError):
pass
if isinstance(key, basestring):
# try the key as-is to allow getting at ticks with special characters (",", ":", ...)
try:
# avoid matching 0 against False or 0.0, note that Group keys have object dtype and so always pass this
# test
if self._is_key_type_compatible(key):
return mapping[key]
# we must catch TypeError because key might not be hashable (eg slice)
# IndexError is for when mapping is an ndarray
except (KeyError, TypeError, IndexError):
pass
# transform "specially formatted strings" for slices, lists, LGroup and IGroup to actual objects
key = _to_key(key)
if not PY2 and isinstance(key, range):
key = list(key)
# this can happen when key was passed as a string and converted to a Group via _to_key
if isinstance(key, Group) and isinstance(key.axis, basestring) and key.axis != self.name:
raise KeyError(key)
if isinstance(key, IGroup):
if isinstance(key.axis, Axis):
assert key.axis is self
return key.key
if isinstance(key, LGroup):
# at this point we do not care about the axis nor the name
key = key.key
if isinstance(key, slice):
start = mapping[key.start] if key.start is not None else None
# stop is inclusive in the input key and exclusive in the output !
stop = mapping[key.stop] + 1 if key.stop is not None else None
return slice(start, stop, key.step)
elif isinstance(key, (tuple, list, OrderedSet)):
# TODO: the result should be cached
# Note that this is faster than array_lookup(np.array(key), mapping)
res = np.empty(len(key), int)
try:
for i, label in enumerate(_seq_group_to_name(key)):
res[i] = mapping[label]
except KeyError:
for i, label in enumerate(key):
res[i] = mapping[label]
return res
elif isinstance(key, np.ndarray):
# handle fancy indexing with a ndarray of labels
# TODO: the result should be cached
# TODO: benchmark this against the tuple/list version above when mapping is large
# array_lookup is O(len(key) * log(len(mapping)))
# vs
# tuple/list version is O(len(key)) (dict.getitem is O(1))
# XXX: we might want to special case dtype bool, because in that case the mapping will in most case be
# {False: 0, True: 1} or {False: 1, True: 0} and in those case key.astype(int) and (~key).astype(int)
# are MUCH faster
# see C:\Users\gdm\devel\lookup_methods.py and C:\Users\gdm\Desktop\lookup_methods.html
try:
return array_lookup2(_seq_group_to_name(key), self._sorted_keys, self._sorted_values)
except KeyError:
return array_lookup2(key, self._sorted_keys, self._sorted_values)
elif isinstance(key, ABCArray):
from .array import Array
return Array(self.index(key.data), key.axes)
else:
# the first mapping[key] above will cover most cases.
# This code path is only used if the key was given in "non normalized form"
assert np.isscalar(key), "%s (%s) is not scalar" % (key, type(key))
# key is scalar (integer, float, string, ...)
if self._is_key_type_compatible(key):
return mapping[key]
else:
# print("diff dtype", )
raise KeyError(key)
translate = renamed_to(index, 'translate')
# FIXME: remove id
@property
def id(self):
if self.name is not None:
return self.name
else:
raise ValueError('Axis has no name, so no id')
def __str__(self):
name = str(self.name) if self.name is not None else '{?}'
return (name + '*') if self.iswildcard else name
def __repr__(self):
labels = len(self) if self.iswildcard else list(self.labels)
return 'Axis(%r, %r)' % (labels, self.name)
def labels_summary(self):
r"""
Returns a short representation of the labels.
Examples
--------
>>> Axis(100, 'age').labels_summary()
'0 1 2 ... 97 98 99'
"""
def repr_on_strings(v):
return repr(v) if isinstance(v, str) else str(v)
return _seq_summary(self.labels, repr_func=repr_on_strings)
# method factory
def _binop(opname):
r"""
Method factory to create binary operators special methods.
"""
fullname = '__%s__' % opname
def opmethod(self, other):
# give a chance to AxisCollection.__rXXX__ ops to trigger
if isinstance(other, AxisCollection):
# in this case it is indeed return NotImplemented, not raise NotImplementedError!
return NotImplemented
from .array import labels_array
self_array = labels_array(self)
if isinstance(other, Axis):
other = labels_array(other)
return getattr(self_array, fullname)(other)
opmethod.__name__ = fullname
return opmethod
__lt__ = _binop('lt')
__le__ = _binop('le')
__eq__ = _binop('eq')
__ne__ = _binop('ne')
__gt__ = _binop('gt')
__ge__ = _binop('ge')
__add__ = _binop('add')
__radd__ = _binop('radd')
__sub__ = _binop('sub')
__rsub__ = _binop('rsub')
__mul__ = _binop('mul')
__rmul__ = _binop('rmul')
if sys.version < '3':
__div__ = _binop('div')
__rdiv__ = _binop('rdiv')
__truediv__ = _binop('truediv')
__rtruediv__ = _binop('rtruediv')
__floordiv__ = _binop('floordiv')
__rfloordiv__ = _binop('rfloordiv')
__mod__ = _binop('mod')
__rmod__ = _binop('rmod')
__divmod__ = _binop('divmod')
__rdivmod__ = _binop('rdivmod')
__pow__ = _binop('pow')
__rpow__ = _binop('rpow')
__lshift__ = _binop('lshift')
__rlshift__ = _binop('rlshift')
__rshift__ = _binop('rshift')
__rrshift__ = _binop('rrshift')
__and__ = _binop('and')
__rand__ = _binop('rand')
__xor__ = _binop('xor')
__rxor__ = _binop('rxor')
__or__ = _binop('or')
__ror__ = _binop('ror')
__matmul__ = _binop('matmul')
def __larray__(self):
r"""
Returns axis as Array.
"""
from .array import labels_array
return labels_array(self)
def copy(self):
r"""
Returns a copy of the axis.
"""
new_axis = Axis([], self.name)
# XXX: I wonder if we should make a copy of the labels + mapping. There should at least be an option.
new_axis._labels = self._labels
new_axis.__mapping = self.__mapping
new_axis._length = self._length
new_axis._iswildcard = self._iswildcard
new_axis.__sorted_keys = self.__sorted_keys
new_axis.__sorted_values = self.__sorted_values
return new_axis
def replace(self, old, new=None):
r"""
Returns a new axis with some labels replaced.
Parameters
----------
old : any scalar (bool, int, str, ...), tuple/list/array of scalars, or a mapping.
the label(s) to be replaced. Old can be a mapping {old1: new1, old2: new2, ...}
new : any scalar (bool, int, str, ...) or tuple/list/array of scalars, optional
the new label(s). This is argument must not be used if old is a mapping.
Returns
-------
Axis
a new Axis with the old labels replaced by new labels.
Examples
--------
>>> sex = Axis('sex=M,F')
>>> sex
Axis(['M', 'F'], 'sex')
>>> sex.replace('M', 'Male')
Axis(['Male', 'F'], 'sex')
>>> sex.replace({'M': 'Male', 'F': 'Female'})
Axis(['Male', 'Female'], 'sex')
>>> sex.replace(['M', 'F'], ['Male', 'Female'])
Axis(['Male', 'Female'], 'sex')
"""
if isinstance(old, dict):
new = list(old.values())
old = list(old.keys())
elif np.isscalar(old):
assert new is not None and np.isscalar(new), "%s is not a scalar but a %s" % (new, type(new).__name__)
old = [old]
new = [new]
else:
seq = (tuple, list, np.ndarray)
assert isinstance(old, seq), "%s is not a sequence but a %s" % (old, type(old).__name__)
assert isinstance(new, seq), "%s is not a sequence but a %s" % (new, type(new).__name__)
assert len(old) == len(new)
# using object dtype because new labels length can be larger than the fixed str length in the self.labels array
labels = self.labels.astype(object)
indices = self.index(old)
labels[indices] = new
return Axis(labels, self.name)
def apply(self, func):
r"""
Returns a new axis with the labels transformed by func.
Parameters
----------
func : callable
A callable which takes a single argument and returns a single value.
Returns
-------
Axis
a new Axis with the transformed labels.
Examples
--------
>>> sex = Axis('sex=MALE,FEMALE')
>>> sex.apply(str.capitalize)
Axis(['Male', 'Female'], 'sex')
"""
return Axis(np_frompyfunc(func, 1, 1)(self.labels), self.name)
# XXX: rename to named like Group?
def rename(self, name):
r"""
Renames the axis.
Parameters
----------
name : str
the new name for the axis.
Returns
-------
Axis
a new Axis with the same labels but a different name.
Examples
--------
>>> sex = Axis('sex=M,F')
>>> sex
Axis(['M', 'F'], 'sex')
>>> sex.rename('gender')
Axis(['M', 'F'], 'gender')
"""
res = self.copy()
if isinstance(name, Axis):
name = name.name
res.name = name
return res
def _rename(self, name):
raise TypeError("Axis._rename is deprecated, use Axis.rename instead")
def union(self, other):
r"""Returns axis with the union of this axis labels and other labels.
Labels relative order will be kept intact, but only unique labels will be returned. Labels from this axis will
be before labels from other.
Parameters
----------
other : Axis or any sequence of labels
other labels
Returns
-------
Axis
Examples
--------
>>> a = Axis('a=a0..a2')
>>> a.union('a1')
Axis(['a0', 'a1', 'a2'], 'a')
>>> a.union('a3')
Axis(['a0', 'a1', 'a2', 'a3'], 'a')
>>> a.union(Axis('a=a1..a3'))
Axis(['a0', 'a1', 'a2', 'a3'], 'a')
>>> a.union('a1..a3')
Axis(['a0', 'a1', 'a2', 'a3'], 'a')
>>> a.union(['a1', 'a2', 'a3'])
Axis(['a0', 'a1', 'a2', 'a3'], 'a')
"""
if isinstance(other, basestring):
# TODO : remove [other] if ... when FuturWarning raised in Axis.init will be removed
other = _to_ticks(other, parse_single_int=True) if '..' in other or ',' in other else [other]
if isinstance(other, Axis):
other = other.labels
return Axis(unique_multi((self.labels, other)), self.name)
def intersection(self, other):
r"""Returns axis with the (set) intersection of this axis labels and other labels.
In other words, this will use labels from this axis if they are also in other. Labels relative order will be
kept intact.
Parameters
----------
other : Axis or any sequence of labels
other labels
Returns
-------
Axis
Examples
--------
>>> a = Axis('a=a0..a2')
>>> a.intersection('a1')
Axis(['a1'], 'a')
>>> a.intersection('a3')
Axis([], 'a')
>>> a.intersection(Axis('a=a1..a3'))
Axis(['a1', 'a2'], 'a')
>>> a.intersection('a1..a3')
Axis(['a1', 'a2'], 'a')
>>> a.intersection(['a1', 'a2', 'a3'])
Axis(['a1', 'a2'], 'a')
"""
if isinstance(other, basestring):
# TODO : remove [other] if ... when FuturWarning raised in Axis.init will be removed
other = _to_ticks(other, parse_single_int=True) if '..' in other or ',' in other else [other]
if isinstance(other, Axis):
other = other.labels
to_keep = set(other)
return Axis([l for l in self.labels if l in to_keep], self.name)
def difference(self, other):
r"""Returns axis with the (set) difference of this axis labels and other labels.
In other words, this will use labels from this axis if they are not in other. Labels relative order will be
kept intact.
Parameters
----------
other : Axis or any sequence of labels
other labels
Returns
-------
Axis
Examples
--------
>>> a = Axis('a=a0..a2')
>>> a.difference('a1')
Axis(['a0', 'a2'], 'a')
>>> a.difference('a3')
Axis(['a0', 'a1', 'a2'], 'a')
>>> a.difference(Axis('a=a1..a3'))
Axis(['a0'], 'a')
>>> a.difference('a1..a3')
Axis(['a0'], 'a')
>>> a.difference(['a1', 'a2', 'a3'])
Axis(['a0'], 'a')
"""
if isinstance(other, basestring):
# TODO : remove [other] if ... when FuturWarning raised in Axis.init will be removed
other = _to_ticks(other, parse_single_int=True) if '..' in other or ',' in other else [other]
if isinstance(other, Axis):
other = other.labels
to_drop = set(other)
return Axis([l for l in self.labels if l not in to_drop], self.name)
def align(self, other, join='outer'):
r"""Align axis with other object using specified join method.
Parameters
----------
other : Axis or label sequence
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Defaults to 'outer'.
Returns
-------
Axis
Aligned axis
See Also
--------
Array.align
Examples
--------
>>> axis1 = Axis('a=a0..a2')
>>> axis2 = Axis('a=a1..a3')
>>> axis1.align(axis2)
Axis(['a0', 'a1', 'a2', 'a3'], 'a')
>>> axis1.align(axis2, join='inner')
Axis(['a1', 'a2'], 'a')
>>> axis1.align(axis2, join='left')
Axis(['a0', 'a1', 'a2'], 'a')
>>> axis1.align(axis2, join='right')
Axis(['a1', 'a2', 'a3'], 'a')
>>> axis1.align(axis2, join='exact') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: align method with join='exact' expected
Axis(['a0', 'a1', 'a2'], 'a') to be equal to Axis(['a1', 'a2', 'a3'], 'a')
"""
assert join in {'outer', 'inner', 'left', 'right', 'exact'}
if join == 'outer':
return self.union(other)
elif join == 'inner':
return self.intersection(other)
elif join == 'left':
return self
elif join == 'right':
if not isinstance(other, Axis):
other = Axis(other)
return other
elif join == 'exact':
if not self.equals(other):
raise ValueError("align method with join='exact' "
"expected {!r} to be equal to {!r}".format(self, other))
else:
return self
def to_hdf(self, filepath, key=None):
r"""
Writes axis to a HDF file.
A HDF file can contain multiple axes.
The 'key' parameter is a unique identifier for the axis.
Parameters
----------
filepath : str
Path where the hdf file has to be written.
key : str or Group, optional
Key (path) of the axis within the HDF file (see Notes below).
If None, the name of the axis is used.
Defaults to None.
Notes
-----
Objects stored in a HDF file can be grouped together in `HDF groups`.
If an object 'my_obj' is stored in a HDF group 'my_group',
the key associated with this object is then 'my_group/my_obj'.
Be aware that a HDF group can have subgroups.
Examples
--------
>>> a = Axis("a=a0..a2")
Save axis
>>> # by default, the key is the name of the axis
>>> a.to_hdf('test.h5') # doctest: +SKIP
Save axis with a specific key
>>> a.to_hdf('test.h5', 'a') # doctest: +SKIP
Save axis in a specific HDF group
>>> a.to_hdf('test.h5', 'axes/a') # doctest: +SKIP
"""
if key is None:
if self.name is None:
raise ValueError("Argument key must be provided explicitly in case of anonymous axis")
key = self.name
key = _translate_group_key_hdf(key)
dtype_kind = self.labels.dtype.kind
data = np.char.encode(self.labels, 'utf-8') if dtype_kind == 'U' else self.labels
s = pd.Series(data=data, name=self.name)
with LHDFStore(filepath) as store:
store.put(key, s)
store.get_storer(key).attrs.type = 'Axis'
store.get_storer(key).attrs.dtype_kind = dtype_kind
store.get_storer(key).attrs.wildcard = self.iswildcard
@property
def dtype(self):
return self._labels.dtype
def ignore_labels(self):
r"""Returns a wildcard axis with the same name and length than this axis.
Useful when you want to apply operations between two arrays with the same shape but incompatible axes
(different labels).
Returns
-------
Axis
Examples
--------
>>> a = Axis('a=a1,a2')
>>> a
Axis(['a1', 'a2'], 'a')
>>> a.ignore_labels()
Axis(2, 'a')
"""
return Axis(len(self), self.name)
def _make_axis(obj):
if isinstance(obj, Axis):
return obj
elif isinstance(obj, tuple):
assert len(obj) == 2
labels, name = obj
return Axis(labels, name)
elif isinstance(obj, Group):
return Axis(obj.eval(), obj.axis)
else:
# int, str, list, ndarray
return Axis(obj)
# not using OrderedDict because it does not support indices-based getitem
# not using namedtuple because we have to know the fields in advance (it is a one-off class) and we need more
# functionality than just a named tuple
class AxisCollection(object):
__slots__ = ('_list', '_map')
r"""
Represents a collection of axes.
Parameters
----------
axes : sequence of Axis or int or tuple or str, optional
An axis can be given as an Axis object, an int or a
tuple (labels, name) or a string of the kind
'name=label_1,label_2,label_3'.
Raises
------
ValueError
Cannot have multiple occurrences of the same axis object in a collection.
Notes
-----
Multiple occurrences of the same axis object is not allowed.
However, several axes with the same name are allowed but this is not recommended.
Examples
--------
>>> age = Axis(range(10), 'age')
>>> AxisCollection([3, age, (['M', 'F'], 'sex'), 'time = 2007, 2008, 2009, 2010'])
AxisCollection([
Axis(3, None),
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'age'),
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
>>> AxisCollection('age=0..9; sex=M,F; time=2007..2010')
AxisCollection([
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'age'),
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
"""
def __init__(self, axes=None):
if axes is None:
axes = []
elif isinstance(axes, (int, long, Group, Axis)):
axes = [axes]
elif isinstance(axes, str):
axes = [axis.strip() for axis in axes.split(';')]
axes = [_make_axis(axis) for axis in axes]
assert all(isinstance(a, Axis) for a in axes)
# check for duplicate axes
dupe_axes = list(duplicates(axes))
if dupe_axes:
axis = dupe_axes[0]
raise ValueError("Cannot have multiple occurrences of the same axis object in a collection ('%s' -- %s "
"with id %d). Several axes with the same name are allowed though (but not recommended)."
% (axis.name, axis.labels_summary(), id(axis)))
self._list = axes
self._map = {axis.name: axis for axis in axes if axis.name is not None}
# # check dupes on each axis
# for axis in axes:
# axis_dupes = list(duplicates(axis.labels))
# if axis_dupes:
# dupe_labels = ', '.join(str(l) for l in axis_dupes)
# warnings.warn("duplicate labels found for axis %s: %s" % (axis.name, dupe_labels),
# category=UserWarning, stacklevel=2)
#
# # check dupes between axes. Using unique to not spot the dupes within the same axis that we just displayed.
# all_labels = chain(*[np.unique(axis.labels) for axis in axes])
# dupe_labels = list(duplicates(all_labels))
# if dupe_labels:
# label_axes = [(label, ', '.join(display_name for axis, display_name in zip(axes, self.display_names)
# if label in axis))
# for label in dupe_labels]
# dupes = '\n'.join("{} is valid in {{{}}}".format(label, axes) for label, axes in label_axes)
# warnings.warn("ambiguous labels found:\n%s" % dupes, category=UserWarning, stacklevel=5)
def __dir__(self):
# called by dir() and tab-completion at the interactive prompt, must return a list of any valid getattr key.
# dir() takes care of sorting but not uniqueness, so we must ensure that.
names = set(axis.name for axis in self._list if axis.name is not None)
return list(set(dir(self.__class__)) | names)
def __iter__(self):
return iter(self._list)
# TODO: move a few doctests to unit tests
def iter_labels(self, axes=None, ascending=True):
r"""Returns a view of the axes labels.
Parameters
----------
axes : int, str or Axis or tuple of them, optional
Axis or axes along which to iterate and in which order. Defaults to None (all axes in the order they are
in the collection).
ascending : bool, optional
Whether or not to iterate the axes in ascending order (from start to end). Defaults to True.
Returns
-------
Sequence
An object you can iterate (loop) on and index by position.
Examples
--------
>>> from larray import ndtest
>>> axes = ndtest((2, 2)).axes
>>> axes
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1'], 'b')
])
>>> axes.iter_labels()[0]
(a.i[0], b.i[0])
>>> for index in axes.iter_labels():
... print(index)
(a.i[0], b.i[0])
(a.i[0], b.i[1])
(a.i[1], b.i[0])
(a.i[1], b.i[1])
>>> axes.iter_labels(ascending=False)[0]
(a.i[1], b.i[1])
>>> for index in axes.iter_labels(ascending=False):
... print(index)
(a.i[1], b.i[1])
(a.i[1], b.i[0])
(a.i[0], b.i[1])
(a.i[0], b.i[0])
>>> axes.iter_labels(('b', 'a'))[0]
(b.i[0], a.i[0])
>>> for index in axes.iter_labels(('b', 'a')):
... print(index)
(b.i[0], a.i[0])
(b.i[0], a.i[1])
(b.i[1], a.i[0])
(b.i[1], a.i[1])
>>> axes.iter_labels('b')[0]
(b.i[0],)
>>> for index in axes.iter_labels('b'):
... print(index)
(b.i[0],)
(b.i[1],)
"""
axes = self if axes is None else self[axes]
if not isinstance(axes, AxisCollection):
axes = (axes,)
# we need .i because Product uses len and [] on axes and not iter; and [] creates LGroup and not IGroup
p = Product([axis.i for axis in axes])
if not ascending:
p = p[::-1]
return p
def __getattr__(self, key):
try:
return self._map[key]
except KeyError:
return self.__getattribute__(key)
# needed to make *un*pickling work (because otherwise, __getattr__ is called before _map exists, which leads to
# an infinite recursion)
def __getstate__(self):
return self._list
def __setstate__(self, state):
self._list = state
self._map = {axis.name: axis for axis in state if axis.name is not None}
def __getitem__(self, key):
if isinstance(key, Axis):
try:
key = self.index(key)
# transform ValueError to KeyError
except ValueError:
if key.name is None:
raise KeyError("axis '%s' not found in %s" % (key, self))
else:
# we should NOT check that the object is the same, so that we can use AxisReference objects to
# target real axes
key = key.name
if isinstance(key, (int, np.integer)):
return self._list[key]
elif isinstance(key, (tuple, list)):
if any(axis is Ellipsis for axis in key):
ellipsis_idx = index_by_id(key, Ellipsis)
# going through lists (instead of doing self[key[:ellipsis_idx]] to avoid problems with anonymous axes
before_ellipsis = [self[k] for k in key[:ellipsis_idx]]
after_ellipsis = [self[k] for k in key[ellipsis_idx + 1:]]
ellipsis_axes = list(self - before_ellipsis - after_ellipsis)
return AxisCollection(before_ellipsis + ellipsis_axes + after_ellipsis)
# XXX: also use get_by_pos if tuple/list of Axis?
return AxisCollection([self[k] for k in key])
elif isinstance(key, AxisCollection):
return AxisCollection([self.get_by_pos(k, i)
for i, k in enumerate(key)])
elif isinstance(key, slice):
return AxisCollection(self._list[key])
elif key is None:
raise KeyError("axis '%s' not found in %s" % (key, self))
else:
assert isinstance(key, basestring), type(key)
if key in self._map:
return self._map[key]
else:
raise KeyError("axis '%s' not found in %s" % (key, self))
def _ipython_key_completions_(self):
return list(self._map.keys())
# XXX: I wonder if this whole positional crap should really be part of AxisCollection or the default behavior.
# It could either be moved to make_numpy_broadcastable or made non default
def get_by_pos(self, key, i):
r"""
Returns axis corresponding to a key, or to position i if the key has no name and key object not found.
Parameters
----------
key : key
Key corresponding to an axis.
i : int
Position of the axis (used only if search by key failed).
Returns
-------
Axis
Axis corresponding to the key or the position i.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, sex, time])
>>> col.get_by_pos('sex', 1)
Axis(['M', 'F'], 'sex')
"""
if isinstance(key, Axis) and key.name is None:
try:
# try by object
return self[key]
except KeyError:
if i in self:
res = self[i]
if res.iscompatible(key):
return res
else:
raise ValueError("axis %s is not compatible with %s" % (res, key))
# XXX: KeyError instead?
raise ValueError("axis %s not found in %s" % (key, self))
else:
return self[key]
def __setitem__(self, key, value):
if isinstance(key, slice):
assert isinstance(value, (tuple, list, AxisCollection))
def slice_bound(bound):
if bound is None or isinstance(bound, int):
# out of bounds integer bounds are allowed in slice setitem so we cannot use .index
return bound
else:
return self.index(bound)
start_idx = slice_bound(key.start)
# XXX: we might want to make the stop bound inclusive, which makes more sense for label bounds (but
# prevents inserts via setitem)
stop_idx = slice_bound(key.stop)
old = self._list[start_idx:stop_idx:key.step]
for axis in old:
if axis.name is not None:
del self._map[axis.name]
for axis in value:
if axis.name is not None:
self._map[axis.name] = axis
self._list[start_idx:stop_idx:key.step] = value
elif isinstance(key, (tuple, list, AxisCollection)):
assert isinstance(value, (tuple, list, AxisCollection))
if len(key) != len(value):
raise ValueError('must have as many old axes as new axes')
for k, v in zip(key, value):
self[k] = v
else:
if not isinstance(value, Axis):
value = Axis(value)
idx = self.index(key)
step = 1 if idx >= 0 else -1
self[idx:idx + step:step] = [value]
def __delitem__(self, key):
if isinstance(key, slice):
self[key] = []
else:
idx = self.index(key)
axis = self._list.pop(idx)
if axis.name is not None:
del self._map[axis.name]
def union(self, *args, **kwargs):
validate = kwargs.pop('validate', True)
replace_wildcards = kwargs.pop('replace_wildcards', True)
result = self[:]
for a in args:
if not isinstance(a, AxisCollection):
a = AxisCollection(a)
result.extend(a, validate=validate, replace_wildcards=replace_wildcards)
return result
__or__ = union
# TODO: deprecate method (should use | or union instead)
__add__ = union
# TODO: deprecate method (should use | or union instead) but implement __ror__ !)
def __radd__(self, other):
result = AxisCollection(other)
result.extend(self)
return result
def __and__(self, other):
r"""
Returns the intersection of this collection and other.
"""
if not isinstance(other, AxisCollection):
other = AxisCollection(other)
# XXX: add iscompatible when matching by position?
# TODO: move this to a class method (possibly private) so that we are sure we use same heuristic than in .extend
def contains(col, i, axis):
return axis in col or (axis.name is None and i in col)
return AxisCollection([axis for i, axis in enumerate(self) if contains(other, i, axis)])
def __eq__(self, other):
r"""
Other collection compares equal if all axes compare equal and in the same order. Works with a list.
"""
if self is other:
return True
if not isinstance(other, list):
other = list(other)
return len(self._list) == len(other) and all(a.equals(b) for a, b in zip(self._list, other))
# for python2, we need to define it explicitly
def __ne__(self, other):
return not self == other
def __contains__(self, key):
if isinstance(key, int):
return -len(self) <= key < len(self)
elif isinstance(key, Axis):
# the special case is just a performance optimization to avoid scanning through the whole list
if key.name is not None:
return key.name in self._map
else:
try:
self.index(key)
return True
except ValueError:
return False
# key can be anything, it should just return False in case of weird types
return key in self._map
def isaxis(self, value):
r"""
Tests if input is an Axis object or the name of an axis contained in self.
Parameters
----------
value : Axis or str
Input axis or string
Returns
-------
bool
True if input is an Axis object or the name of an axis contained in the current AxisCollection instance,
False otherwise.
Examples
--------
>>> a = Axis('a=a0,a1')
>>> b = Axis('b=b0,b1')
>>> col = AxisCollection([a, b])
>>> col.isaxis(a)
True
>>> col.isaxis('b')
True
>>> col.isaxis('c')
False
"""
# this is tricky. 0 and 1 can be both axes indices and axes ticks.
# not sure what's worse:
# 1) disallow aggregates(axis_num): users could still use arr.sum(arr.axes[0])
# we could also provide an explicit kwarg (ie this would effectively forbid having an axis named "axis").
# arr.sum(axis=0). I think this is the sanest option. The error message in case we use it without the
# keyword needs to be clearer though.
return isinstance(value, Axis) or (isinstance(value, basestring) and value in self)
# 2) slightly inconsistent API: allow aggregate over single labels if they are string, but not int
# arr.sum(0) would sum on the first axis, but arr.sum('M') would
# sum a single tick. I don't like this option.
# 3) disallow single tick aggregates. Single labels make little sense in the context of an aggregate,
# but you don't always know/want to differenciate the code in that case anyway.
# It would be annoying for e.g. Brussels
# 4) give priority to axes,
# arr.sum(0) would sum on the first axis but arr.sum(5) would
# sum a single tick (assuming there is a int axis and less than six axes).
# return value in self
def __len__(self):
return len(self._list)
ndim = property(__len__)
def __str__(self):
return "{%s}" % ', '.join(self.display_names)
def __repr__(self):
if len(self):
repr_per_axis = [repr(axis) for axis in self._list]
axes_repr = "\n {}\n".format(',\n '.join(repr_per_axis))
else:
axes_repr = ""
return "AxisCollection([{}])".format(axes_repr)
# TODO: kill name argument (does not seem to be used anywhere
def get(self, key, default=None, name=None):
r"""
Returns axis corresponding to key. If not found, the argument `name` is used to create a new Axis.
If `name` is None, the `default` axis is then returned.
Parameters
----------
key : key
Key corresponding to an axis of the current AxisCollection.
default : axis, optional
Default axis to return if key doesn't correspond to any axis of the collection and argument `name` is None.
name : str, optional
If key doesn't correspond to any axis of the collection, a new Axis with this name is created and returned.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, time])
>>> col.get('time')
Axis([2007, 2008, 2009, 2010], 'time')
>>> col.get('sex', sex)
Axis(['M', 'F'], 'sex')
>>> col.get('nb_children', None, 'nb_children')
Axis(1, 'nb_children')
"""
# XXX: use if key in self?
try:
return self[key]
except KeyError:
if name is None:
return default
else:
return Axis(1, name)
def get_all(self, key):
r"""
Returns all axes from key if present and length 1 wildcard axes otherwise.
Parameters
----------
key : AxisCollection
Returns
-------
AxisCollection
Raises
------
AssertionError
Raised if the input key is not an AxisCollection object.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> city = Axis(['London', 'Paris', 'Rome'], 'city')
>>> col = AxisCollection([age, sex, time])
>>> col2 = AxisCollection([age, city, time])
>>> col.get_all(col2)
AxisCollection([
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'age'),
Axis(1, 'city'),
Axis([2007, 2008, 2009, 2010], 'time')
])
"""
assert isinstance(key, AxisCollection)
def get_pos_default(k, i):
try:
return self.get_by_pos(k, i)
except (ValueError, KeyError):
# XXX: is having i as name really helps?
if len(k) == 1:
return k
else:
return Axis(1, k.name if k.name is not None else i)
return AxisCollection([get_pos_default(k, i) for i, k in enumerate(key)])
def keys(self):
r"""
Returns list of all axis names.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, sex, time]).keys()
['age', 'sex', 'time']
"""
# XXX: include id/num for anonymous axes? I think I should
return [a.name for a in self._list]
def pop(self, axis=-1):
r"""
Removes and returns an axis.
Parameters
----------
axis : key, optional
Axis to remove and return. Default value is -1 (last axis).
Returns
-------
Axis
If no argument is provided, the last axis is removed and returned.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, sex, time])
>>> col.pop('age')
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'age')
>>> col
AxisCollection([
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
>>> col.pop()
Axis([2007, 2008, 2009, 2010], 'time')
"""
axis = self[axis]
del self[axis]
return axis
def append(self, axis):
r"""
Appends axis at the end of the collection.
Parameters
----------
axis : Axis
Axis to append.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, sex])
>>> col.append(time)
>>> col
AxisCollection([
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'age'),
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
"""
self[len(self):len(self)] = [axis]
def check_compatible(self, axes):
r"""
Checks if axes passed as argument are compatible with those contained in the collection.
Raises ValueError if not.
See Also
--------
Axis.iscompatible
"""
for i, axis in enumerate(axes):
local_axis = self.get_by_pos(axis, i)
if not local_axis.iscompatible(axis):
raise ValueError("incompatible axes:\n{!r}\nvs\n{!r}".format(axis, local_axis))
# XXX: deprecate method (functionality is duplicated in union)?
# I am not so sure anymore we need to actually deprecate the method: having both methods with the same
# semantic like we currently have is useless indeed but I think we should have both a set-like method (union)
# and the possibility to add an axis unconditionally (append or extend). That is, add an axis, even if that
# name already exists. This is especially important for anonymous axes (see my comments in stack for example)
# TODO: deprecate validate argument (unused)
# TODO: deprecate replace_wildcards argument (unused)
def extend(self, axes, validate=True, replace_wildcards=False):
r"""
Extends the collection by appending the axes from `axes`.
Parameters
----------
axes : sequence of Axis (list, tuple, AxisCollection)
validate : bool, optional
replace_wildcards : bool, optional
Raises
------
TypeError
Raised if `axes` is not a sequence of Axis (list, tuple or AxisCollection)
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection(age)
>>> col.extend([sex, time])
>>> col
AxisCollection([
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'age'),
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
"""
# axes should be a sequence
if not isinstance(axes, (tuple, list, AxisCollection)):
raise TypeError("AxisCollection can only be extended by a sequence of Axis, not %s" % type(axes).__name__)
# check that common axes are the same
# if validate:
# self.check_compatible(axes)
# TODO: factorize with get_by_pos
def get_axis(col, i, axis):
if axis in col:
return col[axis]
elif axis.name is None and i in col:
return col[i]
else:
return None
for i, axis in enumerate(axes):
old_axis = get_axis(self, i, axis)
if old_axis is None:
# append axis
self[len(self):len(self)] = [axis]
# elif replace_wildcards and old_axis.iswildcard:
# self[old_axis] = axis
else:
# check that common axes are the same
if validate and not old_axis.iscompatible(axis):
raise ValueError("incompatible axes:\n%r\nvs\n%r" % (axis, old_axis))
if replace_wildcards and old_axis.iswildcard:
self[old_axis] = axis
def index(self, axis, compatible=False):
r"""
Returns the index of axis.
`axis` can be a name or an Axis object (or an index). If the Axis object itself exists in the list, index()
will return it. Otherwise, it will return the index of the local axis with the same name than the key (whether
it is compatible or not).
Parameters
----------
axis : Axis or int or str
Can be the axis itself or its position (returned if represents a valid index) or its name.
compatible : bool, optional
If axis is an Axis, whether to find an exact match (using Axis.equals) or any
compatible axis (using Axis.iscompatible)
Returns
-------
int
Index of the axis.
Raises
------
ValueError
Raised if the axis is not present.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, sex, time])
>>> col.index(time)
2
>>> col.index('sex')
1
"""
if isinstance(axis, int):
if -len(self) <= axis < len(self):
return axis
else:
raise ValueError("axis %d is not in collection" % axis)
elif isinstance(axis, Axis):
try:
# 1) first look for that particular axis object
# This avoids testing labels of each axis and makes sure the result is correct even if there are
# several axes with the same name and labels.
return index_by_id(self._list, axis)
except ValueError:
# 2) look for an axis with the same name and labels using axis.equals
# This makes sure that if there are several axes with the same name but different labels, it returns
# the index of the one with the correct labels. This is especially important for anonymous axes but is
# also useful for other axes. Note that this shouldn't be too slow as labels will only be actually
# checked if the name match.
if compatible:
for i, item in enumerate(self._list):
if item.iscompatible(axis):
return i
else:
# We cannot use self._list.index because it use Axis.__eq__ which produces an Array
for i, item in enumerate(self._list):
if item.equals(axis):
return i
# 3) otherwise look for any axis with the same name
name = axis.name
else:
name = axis
if name is None:
raise ValueError("%r is not in collection" % axis)
return self.names.index(name)
# XXX: we might want to return a new AxisCollection (same question for other inplace operations:
# append, extend, pop, __delitem__, __setitem__)
def insert(self, index, axis):
r"""
Inserts axis before index.
Parameters
----------
index : int
position of the inserted axis.
axis : Axis
axis to insert.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> col = AxisCollection([age, time])
>>> col.insert(1, sex)
>>> col
AxisCollection([
Axis([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'age'),
Axis(['M', 'F'], 'sex'),
Axis([2007, 2008, 2009, 2010], 'time')
])
"""
self[index:index] = [axis]
def copy(self):
r"""
Returns a copy.
"""
return self[:]
def rename(self, renames=None, to=None, **kwargs):
r"""Renames axes of the collection.
Parameters
----------
renames : axis ref or dict {axis ref: str} or list of tuple (axis ref, str), optional
Renames to apply. If a single axis reference is given, the `to` argument must be used.
to : str or Axis, optional
New name if `renames` contains a single axis reference.
**kwargs : str or Axis
New name for each axis given as a keyword argument.
Returns
-------
AxisCollection
collection with axes renamed.
Examples
--------
>>> nat = Axis('nat=BE,FO')
>>> sex = Axis('sex=M,F')
>>> axes = AxisCollection([nat, sex])
>>> axes
AxisCollection([
Axis(['BE', 'FO'], 'nat'),
Axis(['M', 'F'], 'sex')
])
>>> axes.rename(nat, 'nat2')
AxisCollection([
Axis(['BE', 'FO'], 'nat2'),
Axis(['M', 'F'], 'sex')
])
>>> axes.rename(nat='nat2', sex='sex2')
AxisCollection([
Axis(['BE', 'FO'], 'nat2'),
Axis(['M', 'F'], 'sex2')
])
>>> axes.rename([('nat', 'nat2'), ('sex', 'sex2')])
AxisCollection([
Axis(['BE', 'FO'], 'nat2'),
Axis(['M', 'F'], 'sex2')
])
>>> axes.rename({'nat': 'nat2', 'sex': 'sex2'})
AxisCollection([
Axis(['BE', 'FO'], 'nat2'),
Axis(['M', 'F'], 'sex2')
])
"""
if isinstance(renames, dict):
items = list(renames.items())
elif isinstance(renames, list):
items = renames[:]
elif isinstance(renames, (str, Axis, int)):
items = [(renames, to)]
else:
items = []
items += kwargs.items()
renames = {self[k]: v for k, v in items}
return AxisCollection([a.rename(renames[a]) if a in renames else a
for a in self])
# XXX: what's the point in supporting a list of Axis or AxisCollection in axes_to_replace?
# it is used in Array.set_axes but if it is only there, shouldn't the support for that be
# moved there?
def replace(self, axes_to_replace=None, new_axis=None, inplace=False, **kwargs):
r"""Replace one, several or all axes of the collection.
Parameters
----------
axes_to_replace : axis ref or dict {axis ref: axis} or list of tuple (axis ref, axis) \
or list of Axis or AxisCollection, optional
Axes to replace. If a single axis reference is given, the `new_axis` argument must be provided.
If a list of Axis or an AxisCollection is given, all axes will be replaced by the new ones.
In that case, the number of new axes must match the number of the old ones. Defaults to None.
new_axis : axis ref, optional
New axis if `axes_to_replace` contains a single axis reference. Defaults to None.
inplace : bool, optional
Whether or not to modify the original object or return a new AxisCollection and leave the original intact.
Defaults to False.
**kwargs : Axis
New axis for each axis to replace given as a keyword argument.
Returns
-------
AxisCollection
AxisCollection with axes replaced.
Examples
--------
>>> from larray import ndtest
>>> axes = ndtest((2, 3)).axes
>>> axes
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
>>> row = Axis(['r0', 'r1'], 'row')
>>> column = Axis(['c0', 'c1', 'c2'], 'column')
Replace one axis (second argument `new_axis` must be provided)
>>> axes.replace(X.a, row) # doctest: +SKIP
>>> # or
>>> axes.replace(X.a, "row=r0,r1")
AxisCollection([
Axis(['r0', 'r1'], 'row'),
Axis(['b0', 'b1', 'b2'], 'b')
])
Replace several axes (keywords, list of tuple or dictionary)
>>> axes.replace(a=row, b=column) # doctest: +SKIP
>>> # or
>>> axes.replace(a="row=r0,r1", b="column=c0,c1,c2") # doctest: +SKIP
>>> # or
>>> axes.replace([(X.a, row), (X.b, column)]) # doctest: +SKIP
>>> # or
>>> axes.replace({X.a: row, X.b: column})
AxisCollection([
Axis(['r0', 'r1'], 'row'),
Axis(['c0', 'c1', 'c2'], 'column')
])
Replace all axes (list of axes or AxisCollection)
>>> axes.replace([row, column])
AxisCollection([
Axis(['r0', 'r1'], 'row'),
Axis(['c0', 'c1', 'c2'], 'column')
])
>>> arr = ndtest([row, column])
>>> axes.replace(arr.axes)
AxisCollection([
Axis(['r0', 'r1'], 'row'),
Axis(['c0', 'c1', 'c2'], 'column')
])
"""
if not PY2 and isinstance(axes_to_replace, zip):
axes_to_replace = list(axes_to_replace)
if isinstance(axes_to_replace, (list, AxisCollection)) and \
all([isinstance(axis, Axis) for axis in axes_to_replace]):
if len(axes_to_replace) != len(self):
raise ValueError('{} axes given as argument, expected {}'.format(len(axes_to_replace), len(self)))
axes = axes_to_replace
else:
axes = self if inplace else self[:]
if isinstance(axes_to_replace, dict):
items = list(axes_to_replace.items())
elif isinstance(axes_to_replace, list):
assert all([isinstance(item, tuple) and len(item) == 2 for item in axes_to_replace])
items = axes_to_replace[:]
elif isinstance(axes_to_replace, (basestring, Axis, int)):
items = [(axes_to_replace, new_axis)]
else:
items = []
items += kwargs.items()
for old, new in items:
axes[old] = new
if inplace:
return self
else:
return AxisCollection(axes)
def _guess_axis(self, axis_key):
if isinstance(axis_key, Group):
group_axis = axis_key.axis
if group_axis is not None:
# we have axis information but not necessarily an Axis object from self.axes
real_axis = self[group_axis]
if group_axis is not real_axis:
axis_key = axis_key.with_axis(real_axis)
return axis_key
# TODO: instead of checking all axes, we should have a big mapping
# (in AxisCollection or Array):
# label -> (axis, index)
# or possibly (for ambiguous labels)
# label -> {axis: index}
# but for Pandas, this wouldn't work, we'd need label -> axis
valid_axes = []
for axis in self:
try:
axis.index(axis_key)
valid_axes.append(axis)
except KeyError:
continue
if not valid_axes:
raise ValueError("%s is not a valid label for any axis" % axis_key)
elif len(valid_axes) > 1:
valid_axes = ', '.join(a.name if a.name is not None else '{{{}}}'.format(self.axes.index(a))
for a in valid_axes)
raise ValueError('%s is ambiguous (valid in %s)' % (axis_key, valid_axes))
return valid_axes[0][axis_key]
def set_labels(self, axis=None, labels=None, inplace=False, **kwargs):
r"""Replaces the labels of one or several axes.
Parameters
----------
axis : string or Axis or dict
Axis for which we want to replace labels, or mapping {axis: changes} where changes can either be the
complete list of labels, a mapping {old_label: new_label} or a function to transform labels.
If there is no ambiguity (two or more axes have the same labels), `axis` can be a direct mapping
{old_label: new_label}.
labels : int, str, iterable or mapping or function, optional
Integer or list of values usable as the collection of labels for an Axis. If this is mapping, it must be
{old_label: new_label}. If it is a function, it must be a function accepting a single argument (a
label) and returning a single value. This argument must not be used if axis is a mapping.
inplace : bool, optional
Whether or not to modify the original object or return a new AxisCollection and leave the original intact.
Defaults to False.
**kwargs :
`axis`=`labels` for each axis you want to set labels.
Returns
-------
AxisCollection
AxisCollection with modified labels.
Warnings
--------
Not passing a mapping but the complete list of new labels as the 'labels' argument must be done with caution.
Make sure that the order of new labels corresponds to the exact same order of previous labels.
Examples
--------
>>> from larray import ndtest
>>> axes = AxisCollection('nat=BE,FO;sex=M,F')
>>> axes
AxisCollection([
Axis(['BE', 'FO'], 'nat'),
Axis(['M', 'F'], 'sex')
])
>>> axes.set_labels('sex', ['Men', 'Women'])
AxisCollection([
Axis(['BE', 'FO'], 'nat'),
Axis(['Men', 'Women'], 'sex')
])
when passing a single string as labels, it will be interpreted to create the list of labels, so that one can
use the same syntax than during axis creation.
>>> axes.set_labels('sex', 'Men,Women')
AxisCollection([
Axis(['BE', 'FO'], 'nat'),
Axis(['Men', 'Women'], 'sex')
])
to replace only some labels, one must give a mapping giving the new label for each label to replace
>>> axes.set_labels('sex', {'M': 'Men'})
AxisCollection([
Axis(['BE', 'FO'], 'nat'),
Axis(['Men', 'F'], 'sex')
])
to transform labels by a function, use any function accepting and returning a single argument:
>>> axes.set_labels('nat', str.lower)
AxisCollection([
Axis(['be', 'fo'], 'nat'),
Axis(['M', 'F'], 'sex')
])
to replace labels for several axes at the same time, one should give a mapping giving the new labels for each
changed axis
>>> axes.set_labels({'sex': 'Men,Women', 'nat': 'Belgian,Foreigner'})
AxisCollection([
Axis(['Belgian', 'Foreigner'], 'nat'),
Axis(['Men', 'Women'], 'sex')
])
or use keyword arguments
>>> axes.set_labels(sex='Men,Women', nat='Belgian,Foreigner')
AxisCollection([
Axis(['Belgian', 'Foreigner'], 'nat'),
Axis(['Men', 'Women'], 'sex')
])
one can also replace some labels in several axes by giving a mapping of mappings
>>> axes.set_labels({'sex': {'M': 'Men'}, 'nat': {'BE': 'Belgian'}})
AxisCollection([
Axis(['Belgian', 'FO'], 'nat'),
Axis(['Men', 'F'], 'sex')
])
when there is no ambiguity (two or more axes have the same labels), it is possible to give a mapping
between old and new labels
>>> axes.set_labels({'M': 'Men', 'BE': 'Belgian'})
AxisCollection([
Axis(['Belgian', 'FO'], 'nat'),
Axis(['Men', 'F'], 'sex')
])
"""
if axis is None:
changes = {}
elif isinstance(axis, dict):
changes = axis
elif isinstance(axis, (basestring, Axis, int)):
changes = {axis: labels}
else:
raise ValueError("Expected None or a string/int/Axis/dict instance for axis argument")
changes.update(kwargs)
# TODO: we should implement the non-dict behavior in Axis.replace, so that we can simplify this code to:
# new_axes = [self[old_axis].replace(axis_changes) for old_axis, axis_changes in changes.items()]
new_axes = []
for old_axis, axis_changes in changes.items():
try:
real_axis = self[old_axis]
except KeyError:
axis_changes = {old_axis: axis_changes}
real_axis = self._guess_axis(old_axis).axis
if isinstance(axis_changes, dict):
new_axis = real_axis.replace(axis_changes)
elif callable(axis_changes):
new_axis = real_axis.apply(axis_changes)
else:
new_axis = Axis(axis_changes, real_axis.name)
new_axes.append((real_axis, new_axis))
return self.replace(new_axes, inplace=inplace)
# TODO: deprecate method (should use __sub__ instead)
def without(self, axes):
r"""
Returns a new collection without some axes.
You can use a comma separated list of names.
Parameters
----------
axes : int, str, Axis or sequence of those
Axes to not include in the returned AxisCollection.
In case of string, axes are separated by a comma and no whitespace is accepted.
Returns
-------
AxisCollection
New collection without some axes.
Notes
-----
Set operation so axes can contain axes not present in self
Examples
--------
>>> age = Axis('age=0..5')
>>> sex = Axis('sex=M,F')
>>> time = Axis('time=2015..2017')
>>> col = AxisCollection([age, sex, time])
>>> col.without([age, sex])
AxisCollection([
Axis([2015, 2016, 2017], 'time')
])
>>> col.without(0)
AxisCollection([
Axis(['M', 'F'], 'sex'),
Axis([2015, 2016, 2017], 'time')
])
>>> col.without('sex,time')
AxisCollection([
Axis([0, 1, 2, 3, 4, 5], 'age')
])
"""
return self - axes
def __sub__(self, axes):
r"""
See Also
--------
without
"""
if isinstance(axes, basestring):
axes = axes.split(',')
elif isinstance(axes, (int, Axis)):
axes = [axes]
def index_first_compatible(axis):
try:
return self.index(axis, compatible=True)
except ValueError:
return -1
# only keep indices (as this works for unnamed axes too)
to_remove = set(index_first_compatible(axis) for axis in axes)
# -1 in to_remove are not a problem since enumerate starts at 0
return AxisCollection([axis for i, axis in enumerate(self) if i not in to_remove])
def _translate_axis_key_chunk(self, axis_key):
"""
Translates *single axis* label-based key to an IGroup
Parameters
----------
axis_key : any kind of key
Key to select axis.
Returns
-------
IGroup
Indices group with a valid axis (from self)
"""
axis_key = remove_nested_groups(axis_key)
if isinstance(axis_key, IGroup) and axis_key.axis is not None:
# retarget to real axis, if needed
# only retarget IGroup and not LGroup to give the opportunity for axis.translate to try the "ticks"
# version of the group ONLY if key.axis is not real_axis (for performance reasons)
if axis_key.axis in self:
axis_key = axis_key.retarget_to(self[axis_key.axis])
else:
# axis associated with axis_key may not belong to self.
# In that case, we translate IGroup to labels and search for a compatible axis
# (see end of this method)
axis_key = axis_key.to_label()
# already positional
if isinstance(axis_key, IGroup):
if axis_key.axis is None:
raise ValueError("positional groups without axis are not supported")
return axis_key
# labels but known axis
if isinstance(axis_key, LGroup) and axis_key.axis is not None:
try:
real_axis = self[axis_key.axis]
try:
axis_pos_key = real_axis.index(axis_key)
except KeyError:
raise ValueError("%r is not a valid label for any axis" % axis_key)
return real_axis.i[axis_pos_key]
except KeyError:
# axis associated with axis_key may not belong to self.
# In that case, we translate LGroup to labels and search for a compatible axis
# (see end of this method)
axis_key = axis_key.to_label()
# otherwise we need to guess the axis
# TODO: instead of checking all axes, we should have a big mapping (in AxisCollection):
# label -> (axis, index) but for sparse/multi-index, this would not work, we'd need label -> axis
valid_axes = []
# TODO: use axis_key dtype to only check compatible axes
for axis in self:
try:
axis_pos_key = axis.index(axis_key)
valid_axes.append(axis)
except KeyError:
continue
if not valid_axes:
raise ValueError("%r is not a valid label for any axis" % axis_key)
elif len(valid_axes) > 1:
# TODO: make an AxisCollection.display_name(axis) method out of this
# valid_axes = ', '.join(self.display_name(axis) for a in valid_axes)
valid_axes = ', '.join(a.name if a.name is not None else '{{{}}}'.format(self.index(a))
for a in valid_axes)
raise ValueError('%s is ambiguous (valid in %s)' % (axis_key, valid_axes))
return valid_axes[0].i[axis_pos_key]
def _translate_axis_key(self, axis_key):
"""
Translates single axis label-based key to IGroup
Parameters
----------
axis_key : any valid key
Key to select axis.
Returns
-------
IGroup
Indices group with a valid axis (from self)
"""
# called from _key_to_igroups
from .array import Array
# Need to convert string keys to groups otherwise command like
# >>> ndtest((5, 5)).drop('1[a0]')
# will work although it shouldn't
if isinstance(axis_key, basestring):
key = _to_key(axis_key)
if isinstance(key, Group):
axis_key = key
# translate Axis keys to LGroup keys
# FIXME: this should be simply:
# if isinstance(axis_key, Axis):
# axis_key = axis_key[:]
# but it does not work for some reason (the retarget does not seem to happen)
if isinstance(axis_key, Axis):
real_axis = self[axis_key]
if isinstance(axis_key, AxisReference) or axis_key.equals(real_axis):
axis_key = real_axis[:]
else:
axis_key = axis_key.labels
# TODO: do it for Group without axis too
if isinstance(axis_key, (tuple, list, np.ndarray, Array)):
axis = None
# TODO: I should actually do some benchmarks to see if this is useful, and estimate which numbers to use
# FIXME: check that size is < than key size
for size in (1, 10, 100, 1000):
# TODO: do not recheck already checked elements
key_chunk = axis_key.i[:size] if isinstance(axis_key, Array) else axis_key[:size]
try:
tkey = self._translate_axis_key_chunk(key_chunk)
axis = tkey.axis
break
# TODO: we should only continue when ValueError is caused by an ambiguous key, otherwise we only delay
# an inevitable failure
except ValueError:
continue
# the (start of the) key match a single axis
if axis is not None:
# make sure we have an Axis object
# TODO: we should make sure the tkey returned from _translate_axis_key_chunk always contains a
# real Axis (and thus kill this line)
axis = self[axis]
# wrap key in LGroup
axis_key = axis[axis_key]
# XXX: reuse tkey chunks and only translate the rest?
return self._translate_axis_key_chunk(axis_key)
else:
return self._translate_axis_key_chunk(axis_key)
def _key_to_igroups(self, key):
"""
Translates any key to an IGroups tuple.
Parameters
----------
key : scalar, list/array of scalars, Group or tuple or dict of them
any key supported by Array.__get|setitem__
Returns
-------
tuple
tuple of IGroup, each IGroup having a real axis from this array.
The order of the IGroups is *not* guaranteed to be the same as the order of axes.
See Also
--------
Axis.index
"""
from .array import Array
if isinstance(key, dict):
# key axes could be strings or axis references and we want real axes
key = tuple(self[axis][axis_key] for axis, axis_key in key.items())
elif not isinstance(key, tuple):
# convert scalar keys to 1D keys
key = (key,)
# handle ExprNode
key = tuple(axis_key.evaluate(self) if isinstance(axis_key, ExprNode) else axis_key
for axis_key in key)
nonboolkey = []
for axis_key in key:
if isinstance(axis_key, np.ndarray) and np.issubdtype(axis_key.dtype, np.bool_):
if axis_key.shape != self.shape:
raise ValueError("boolean key with a different shape ({}) than array ({})"
.format(axis_key.shape, self.shape))
axis_key = Array(axis_key, self)
if isinstance(axis_key, Array) and np.issubdtype(axis_key.dtype, np.bool_):
extra_key_axes = axis_key.axes - self
if extra_key_axes:
raise ValueError("boolean subset key contains more axes ({}) than array ({})"
.format(axis_key.axes, self))
# nonzero (currently) returns a tuple of IGroups containing 1D Arrays (one IGroup per axis)
nonboolkey.extend(axis_key.nonzero())
else:
nonboolkey.append(axis_key)
key = tuple(nonboolkey)
# drop slice(None) and Ellipsis since they are meaningless because of guess_axis.
# XXX: we might want to raise an exception when we find Ellipses or (most) slice(None) because except for
# a single slice(None) a[:], I don't think there is any point.
key = [axis_key for axis_key in key
if not _isnoneslice(axis_key) and axis_key is not Ellipsis]
# translate all keys to IGroup
return tuple(self._translate_axis_key(axis_key) for axis_key in key)
def _translated_key(self, key):
"""
Transforms any key (from Array.__get|setitem__) to a complete indices-based key.
Parameters
----------
key : scalar, list/array of scalars, Group or tuple or dict of them
any key supported by Array.__get|setitem__
Returns
-------
tuple
len(tuple) == self.ndim
This key is not yet usable as is in a numpy array as it can still contain Array parts and the advanced key
parts are not broadcasted together yet.
"""
# any key -> (IGroup, IGroup, ...)
igroup_key = self._key_to_igroups(key)
# extract axis from Group keys
key_items = [(k.axis, k) for k in igroup_key]
# even keys given as dict can contain duplicates (if the same axis was
# given under different forms, e.g. name and AxisReference).
dupe_axes = list(duplicates(axis for axis, axis_key in key_items))
if dupe_axes:
dupe_axes = ', '.join(str(axis) for axis in dupe_axes)
raise ValueError("key has several values for axis: %s\n%s" % (dupe_axes, key_items))
# IGroup -> raw positional
dict_key = {axis: axis.index(axis_key) for axis, axis_key in key_items}
# dict -> tuple (complete and order key)
assert all(isinstance(k, Axis) for k in dict_key)
return tuple(dict_key[axis] if axis in dict_key else slice(None)
for axis in self)
def _key_to_raw_and_axes(self, key, collapse_slices=False, translate_key=True):
r"""
Transforms any key (from Array.__getitem__) to a raw numpy key, the resulting axes, and potentially a tuple
of indices to transpose axes back to where they were.
Parameters
----------
key : scalar, list/array of scalars, Group or tuple or dict of them
any key supported by Array.__getitem__
collapse_slices : bool, optional
Whether or not to convert ranges to slices. Defaults to False.
Returns
-------
raw_key, res_axes, transposed_indices
"""
from .array import make_numpy_broadcastable, Array, sequence
if translate_key:
key = self._translated_key(key)
assert isinstance(key, tuple) and len(key) == self.ndim
# scalar array
if not self.ndim:
return key, AxisCollection([]), None
# transform ranges to slices if needed
if collapse_slices:
# isinstance(np.ndarray, collections.Sequence) is False but it behaves like one
seq_types = (tuple, list, np.ndarray)
# TODO: we should only do this if there are no Array key (with axes corresponding to the range)
# otherwise we will be translating them back to a range afterwards
key = [_range_to_slice(axis_key, len(axis)) if isinstance(axis_key, seq_types) else axis_key
for axis_key, axis in zip(key, self)]
# transform non-Array advanced keys (list and ndarray) to Array
def to_la_ikey(axis, axis_key):
if isinstance(axis_key, (int, np.integer, slice, Array)):
return axis_key
else:
assert isinstance(axis_key, (list, np.ndarray))
res_axis = axis.subaxis(axis_key)
# TODO: for perf reasons, we should bypass creating an actual Array by returning axes and key_data
# but then we will need to implement a function similar to make_numpy_broadcastable which works on axes
# and rawdata instead of arrays
return Array(axis_key, res_axis)
key = tuple(to_la_ikey(axis, axis_key) for axis, axis_key in zip(self, key))
# transform slice keys to Array too IF they refer to axes present in advanced key (so that those axes
# broadcast together instead of being duplicated, which is not what we want)
def get_axes(value):
return value.axes if isinstance(value, Array) else AxisCollection([])
def slice_to_sequence(axis, axis_key):
if isinstance(axis_key, slice) and axis in la_key_axes:
# TODO: sequence assumes the axis in the la_key is in the same order. It will be easier to solve when
# make_numpy_broadcastable automatically aligns all arrays
start, stop, step = axis_key.indices(len(axis))
return sequence(axis.subaxis(axis_key), initial=start, inc=step)
else:
return axis_key
# XXX: can we avoid computing this twice? (here and in make_numpy_broadcastable)
la_key_axes = AxisCollection.union(*[get_axes(k) for k in key])
key = tuple(slice_to_sequence(axis, axis_key) for axis, axis_key in zip(self, key))
# start with the simple (slice) keys
# scalar keys are ignored since they do not produce any resulting axis
res_axes = AxisCollection([axis.subaxis(axis_key)
for axis, axis_key in zip(self, key)
if isinstance(axis_key, slice)])
transpose_indices = None
# if there are only simple keys, do not bother going via the "advanced indexing" code path
if all(isinstance(axis_key, (int, np.integer, slice)) for axis_key in key):
bcasted_adv_keys = key
else:
# Now that we know advanced indexing comes into play, we need to compute were the subspace created by the
# advanced indexes will be inserted. Note that there is only ever a SINGLE combined subspace (even if it
# has multiple axes) because all the non slice indexers MUST broadcast together to a single
# "advanced indexer"
# to determine where the "subspace" axes will be inserted, a scalar key counts as "advanced" indexing
adv_axes_indices = [i for i, axis_key in enumerate(key)
if not isinstance(axis_key, slice)]
diff = np.diff(adv_axes_indices)
if np.any(diff > 1):
# insert advanced indexing subspace in front
adv_key_subspace_pos = 0
# If all (non scalar) adv_keys are 1D and have a different axis name, we will index the cross product.
# In that case, store their original order so that we can transpose them back to where they were.
adv_keys = [axis_key for axis_key in key if not isinstance(axis_key, (int, np.integer, slice))]
if all(axis_key.ndim == 1 for axis_key in adv_keys):
# we can only handle the non-anonymous axes case since anonymous axes will not broadcast to the
# cross product anyway
if len(set(axis_key.axes[0].name for axis_key in adv_keys)) == len(adv_keys):
# 0, 1, 2, 3, 4, 5 <- original axes indices
# A X A S S A <- key (A = adv, X = scalar/remove, S = slice)
# 0, 2, 5, 3, 4 <- result
# 0, 2, 3, 4, 5 <- desired result
# 0, 1, 3, 4, 2 <- what I need to feed to transpose to get the correct result
adv_axes_indices = [i for i, axis_key in enumerate(key)
if not isinstance(axis_key, (int, np.integer, slice))]
# not taking scalar axes since they will disappear
slice_axes_indices = [i for i, axis_key in enumerate(key)
if isinstance(axis_key, slice)]
result_axes_indices = adv_axes_indices + slice_axes_indices
transpose_indices = tuple(np.array(result_axes_indices).argsort())
else:
# the advanced indexing subspace keep its position (insert at position of first concerned axis)
adv_key_subspace_pos = adv_axes_indices[0]
# scalar/slice keys are ignored by make_numpy_broadcastable, which is exactly what we need
bcasted_adv_keys, adv_key_dest_axes = make_numpy_broadcastable(key)
# insert advanced indexing subspace
res_axes[adv_key_subspace_pos:adv_key_subspace_pos] = adv_key_dest_axes
# transform to raw numpy arrays
raw_broadcasted_key = tuple(k.data if isinstance(k, Array) else k
for k in bcasted_adv_keys)
return raw_broadcasted_key, res_axes, transpose_indices
@property
def labels(self):
r"""
Returns the list of labels of the axes.
Returns
-------
list
List of labels of the axes.
Examples
--------
>>> age = Axis(range(10), 'age')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, time]).labels # doctest: +NORMALIZE_WHITESPACE
[array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
array([2007, 2008, 2009, 2010])]
"""
return [axis.labels for axis in self._list]
@property
def names(self):
r"""
Returns the list of (raw) names of the axes.
Returns
-------
list
List of names of the axes.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, sex, time]).names
['age', 'sex', 'time']
"""
return [axis.name for axis in self._list]
@property
def display_names(self):
r"""
Returns the list of (display) names of the axes.
Returns
-------
list
List of names of the axes. Wildcard axes are displayed with an attached \*.
Anonymous axes (name = None) are replaced by their position wrapped in braces.
Examples
--------
>>> a = Axis(['a1', 'a2'], 'a')
>>> b = Axis(2, 'b')
>>> c = Axis(['c1', 'c2'])
>>> d = Axis(3)
>>> AxisCollection([a, b, c, d]).display_names
['a', 'b*', '{2}', '{3}*']
"""
def display_name(i, axis):
name = axis.name if axis.name is not None else '{%d}' % i
return (name + '*') if axis.iswildcard else name
return [display_name(i, axis) for i, axis in enumerate(self._list)]
@property
def ids(self):
r"""
Returns the list of ids of the axes.
Returns
-------
list
List of ids of the axes.
See Also
--------
axis_id
Examples
--------
>>> a = Axis(2, 'a')
>>> b = Axis(2)
>>> c = Axis(2, 'c')
>>> AxisCollection([a, b, c]).ids
['a', 1, 'c']
"""
return [axis.name if axis.name is not None else i
for i, axis in enumerate(self._list)]
def axis_id(self, axis):
r"""
Returns the id of an axis.
Returns
-------
str or int
Id of axis, which is its name if defined and its position otherwise.
Examples
--------
>>> a = Axis(2, 'a')
>>> b = Axis(2)
>>> c = Axis(2, 'c')
>>> col = AxisCollection([a, b, c])
>>> col.axis_id(a)
'a'
>>> col.axis_id(b)
1
>>> col.axis_id(c)
'c'
"""
axis = self[axis]
return axis.name if axis.name is not None else self.index(axis)
@property
def shape(self):
r"""
Returns the shape of the collection.
Returns
-------
tuple
Tuple of lengths of axes.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, sex, time]).shape
(20, 2, 4)
"""
return tuple(len(axis) for axis in self._list)
@property
def size(self):
r"""
Returns the size of the collection, i.e.
the number of elements of the array.
Returns
-------
int
Number of elements of the array.
Examples
--------
>>> age = Axis(range(20), 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, sex, time]).size
160
"""
return np.prod(self.shape)
@property
def info(self):
r"""
Describes the collection (shape and labels for each axis).
Returns
-------
str
Description of the AxisCollection (shape and labels for each axis).
Examples
--------
>>> age = Axis(20, 'age')
>>> sex = Axis('sex=M,F')
>>> time = Axis([2007, 2008, 2009, 2010], 'time')
>>> AxisCollection([age, sex, time]).info
20 x 2 x 4
age* [20]: 0 1 2 ... 17 18 19
sex [2]: 'M' 'F'
time [4]: 2007 2008 2009 2010
"""
lines = [" %s [%d]: %s" % (name, len(axis), axis.labels_summary())
for name, axis in zip(self.display_names, self._list)]
shape = " x ".join(str(s) for s in self.shape)
return ReprString('\n'.join([shape] + lines))
# XXX: instead of front_if_spread, we might want to require axes to be contiguous
# (ie the caller would have to transpose axes before calling this)
def combine_axes(self, axes=None, sep='_', wildcard=False, front_if_spread=False):
r"""Combine several axes into one.
Parameters
----------
axes : tuple, list, AxisCollection of axes or list of combination of those or dict, optional
axes to combine. Tuple, list or AxisCollection will combine several axes into one. To chain several axes
combinations, pass a list of tuple/list/AxisCollection of axes. To set the name(s) of resulting axis(es),
use a {(axes, to, combine): 'new_axis_name'} dictionary. Defaults to all axes.
sep : str, optional
delimiter to use for combining. Defaults to '_'.
wildcard : bool, optional
whether or not to produce a wildcard axis even if the axes to combine are not.
This is much faster, but loose axes labels.
front_if_spread : bool, optional
whether or not to move the combined axis at the front (it will be the first axis) if the combined axes are
not next to each other.
Returns
-------
AxisCollection
New AxisCollection with combined axes.
Examples
--------
>>> axes = AxisCollection('a=a0,a1;b=b0..b2')
>>> axes
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
>>> axes.combine_axes()
AxisCollection([
Axis(['a0_b0', 'a0_b1', 'a0_b2', 'a1_b0', 'a1_b1', 'a1_b2'], 'a_b')
])
>>> axes.combine_axes(sep='/')
AxisCollection([
Axis(['a0/b0', 'a0/b1', 'a0/b2', 'a1/b0', 'a1/b1', 'a1/b2'], 'a/b')
])
>>> axes += AxisCollection('c=c0..c2;d=d0,d1')
>>> axes.combine_axes(('a', 'c'))
AxisCollection([
Axis(['a0_c0', 'a0_c1', 'a0_c2', 'a1_c0', 'a1_c1', 'a1_c2'], 'a_c'),
Axis(['b0', 'b1', 'b2'], 'b'),
Axis(['d0', 'd1'], 'd')
])
>>> axes.combine_axes({('a', 'c'): 'ac'})
AxisCollection([
Axis(['a0_c0', 'a0_c1', 'a0_c2', 'a1_c0', 'a1_c1', 'a1_c2'], 'ac'),
Axis(['b0', 'b1', 'b2'], 'b'),
Axis(['d0', 'd1'], 'd')
])
# make several combinations at once
>>> axes.combine_axes([('a', 'c'), ('b', 'd')])
AxisCollection([
Axis(['a0_c0', 'a0_c1', 'a0_c2', 'a1_c0', 'a1_c1', 'a1_c2'], 'a_c'),
Axis(['b0_d0', 'b0_d1', 'b1_d0', 'b1_d1', 'b2_d0', 'b2_d1'], 'b_d')
])
>>> axes.combine_axes({('a', 'c'): 'ac', ('b', 'd'): 'bd'})
AxisCollection([
Axis(['a0_c0', 'a0_c1', 'a0_c2', 'a1_c0', 'a1_c1', 'a1_c2'], 'ac'),
Axis(['b0_d0', 'b0_d1', 'b1_d0', 'b1_d1', 'b2_d0', 'b2_d1'], 'bd')
])
"""
if axes is None:
axes = {tuple(self): None}
elif isinstance(axes, AxisCollection):
axes = {tuple(self[axes]): None}
elif isinstance(axes, (list, tuple)):
# checks for nested tuple/list
if all(isinstance(axis, (list, tuple, AxisCollection)) for axis in axes):
axes = {tuple(self[axes_to_combine]): None for axes_to_combine in axes}
else:
axes = {tuple(self[axes]): None}
# axes should be a dict at this time
assert isinstance(axes, dict)
new_axes = self[:]
for _axes, name in axes.items():
_axes = new_axes[_axes]
axes_indices = [new_axes.index(axis) for axis in _axes]
diff = np.diff(axes_indices)
# combined axes in front
if front_if_spread and np.any(diff > 1):
combined_axis_pos = 0
else:
combined_axis_pos = min(axes_indices)
if name is not None:
combined_name = name
# all anonymous axes => anonymous combined axis
elif all(axis.name is None for axis in _axes):
combined_name = None
else:
combined_name = sep.join(str(id_) for id_ in _axes.ids)
if wildcard:
combined_axis = Axis(_axes.size, combined_name)
else:
# TODO: the combined keys should be objects which display as: (axis1_label, axis2_label, ...) but
# which should also store the axes names)
# Q: Should it be the same object as the NDLGroup?/NDKey?
# A: yes. On the Pandas backend, we could/should have separate axes. On the numpy backend we cannot.
if len(_axes) == 1:
# Q: if axis is a wildcard axis, should the result be a wildcard axis (and axes_labels discarded?)
combined_labels = _axes[0].labels
else:
sepjoin = sep.join
axes_labels = [np.array(l, np.str, copy=False) for l in _axes.labels]
combined_labels = [sepjoin(p) for p in product(*axes_labels)]
combined_axis = Axis(combined_labels, combined_name)
new_axes = new_axes - _axes
new_axes.insert(combined_axis_pos, combined_axis)
return new_axes
def split_axes(self, axes=None, sep='_', names=None, regex=None):
r"""Split axes and returns a new collection
The split axes are inserted where the combined axis was.
Parameters
----------
axes : int, str, Axis or any combination of those, optional
axes to split. All labels *must* contain the given delimiter string. To split several axes at once, pass
a list or tuple of axes to split. To set the names of resulting axes, use a {'axis_to_split': (new, axes)}
dictionary. Defaults to all axes whose name contains the `sep` delimiter.
sep : str, optional
delimiter to use for splitting. Defaults to '_'. When `regex` is provided, the delimiter is only used on
`names` if given as one string or on axis name if `names` is None.
names : str or list of str, optional
names of resulting axes. Defaults to None.
regex : str, optional
use the `regex` regular expression to split labels instead of the `sep` delimiter. Defaults to None.
See Also
--------
Axis.split
Array.split_axes
Returns
-------
AxisCollection
Examples
--------
>>> col = AxisCollection('a=a0,a1;b=b0..b2')
>>> col
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
>>> combined = col.combine_axes()
>>> combined
AxisCollection([
Axis(['a0_b0', 'a0_b1', 'a0_b2', 'a1_b0', 'a1_b1', 'a1_b2'], 'a_b')
])
>>> combined.split_axes()
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
Split labels using a regular expression
>>> combined = AxisCollection('a_b = a0b0..a1b2')
>>> combined
AxisCollection([
Axis(['a0b0', 'a0b1', 'a0b2', 'a1b0', 'a1b1', 'a1b2'], 'a_b')
])
>>> combined.split_axes('a_b', regex=r'(\w{2})(\w{2})')
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
Split several axes at once
>>> combined = AxisCollection('a_b = a0_b0..a1_b1; c_d = c0_d0..c1_d1')
>>> combined
AxisCollection([
Axis(['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1'], 'a_b'),
Axis(['c0_d0', 'c0_d1', 'c1_d0', 'c1_d1'], 'c_d')
])
>>> # equivalent to combined.split_axes() which split all axes
>>> # containing the delimiter defined by the argument `sep`
>>> combined.split_axes(['a_b', 'c_d'])
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1'], 'b'),
Axis(['c0', 'c1'], 'c'),
Axis(['d0', 'd1'], 'd')
])
>>> combined.split_axes({'a_b': ('A', 'B'), 'c_d': ('C', 'D')})
AxisCollection([
Axis(['a0', 'a1'], 'A'),
Axis(['b0', 'b1'], 'B'),
Axis(['c0', 'c1'], 'C'),
Axis(['d0', 'd1'], 'D')
])
"""
if axes is None:
axes = {axis: None for axis in self if sep in axis.name}
elif isinstance(axes, (int, basestring, Axis)):
axes = {axes: None}
elif isinstance(axes, (list, tuple)):
if all(isinstance(axis, (int, basestring, Axis)) for axis in axes):
axes = {axis: None for axis in axes}
else:
raise ValueError("Expected tuple or list of int, string or Axis instances")
# axes should be a dict at this time
assert isinstance(axes, dict)
new_axes = self[:]
for axis, names in axes.items():
axis = new_axes[axis]
axis_index = new_axes.index(axis)
split_axes = axis.split(sep, names, regex)
new_axes = new_axes[:axis_index] + split_axes + new_axes[axis_index + 1:]
return new_axes
split_axis = renamed_to(split_axes, 'split_axis')
def align(self, other, join='outer', axes=None):
r"""Align this axis collection with another.
This ensures all common axes are compatible.
Parameters
----------
other : AxisCollection
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Defaults to 'outer'.
axes : AxisReference or sequence of them, optional
Axes to align. Need to be valid in both arrays. Defaults to None (all common axes). This must be specified
when mixing anonymous and non-anonymous axes.
Returns
-------
(left, right) : (AxisCollection, AxisCollection)
Aligned collections
See Also
--------
Array.align
Examples
--------
>>> col1 = AxisCollection("a=a0..a1;b=b0..b2")
>>> col1
AxisCollection([
Axis(['a0', 'a1'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
>>> col2 = AxisCollection("a=a0..a2;c=c0..c0;b=b0..b1")
>>> col2
AxisCollection([
Axis(['a0', 'a1', 'a2'], 'a'),
Axis(['c0'], 'c'),
Axis(['b0', 'b1'], 'b')
])
>>> aligned1, aligned2 = col1.align(col2)
>>> aligned1
AxisCollection([
Axis(['a0', 'a1', 'a2'], 'a'),
Axis(['b0', 'b1', 'b2'], 'b')
])
>>> aligned2
AxisCollection([
Axis(['a0', 'a1', 'a2'], 'a'),
Axis(['c0'], 'c'),
Axis(['b0', 'b1', 'b2'], 'b')
])
Using anonymous axes
>>> col1 = AxisCollection("a0..a1;b0..b2")
>>> col1
AxisCollection([
Axis(['a0', 'a1'], None),
Axis(['b0', 'b1', 'b2'], None)
])
>>> col2 = AxisCollection("a0..a2;b0..b1;c0..c0")
>>> col2
AxisCollection([
Axis(['a0', 'a1', 'a2'], None),
Axis(['b0', 'b1'], None),
Axis(['c0'], None)
])
>>> aligned1, aligned2 = col1.align(col2)
>>> aligned1
AxisCollection([
Axis(['a0', 'a1', 'a2'], None),
Axis(['b0', 'b1', 'b2'], None)
])
>>> aligned2
AxisCollection([
Axis(['a0', 'a1', 'a2'], None),
Axis(['b0', 'b1', 'b2'], None),
Axis(['c0'], None)
])
"""
if join not in {'outer', 'inner', 'left', 'right', 'exact'}:
raise ValueError("join should be one of 'outer', 'inner', 'left', 'right' or 'exact'")
other = other if isinstance(other, AxisCollection) else AxisCollection(other)
# if axes not specified
if axes is None:
# and we have only anonymous axes on both sides
if all(name is None for name in self.names) and all(name is None for name in other.names):
# use N first axes by position
join_axes = list(range(min(len(self), len(other))))
elif any(name is None for name in self.names) or any(name is None for name in other.names):
raise ValueError("axes collections with mixed anonymous/non anonymous axes are not supported by align"
"without specifying axes explicitly")
else:
assert all(name is not None for name in self.names) and all(name is not None for name in other.names)
# use all common axes
join_axes = list(OrderedSet(self.names) & OrderedSet(other.names))
else:
if isinstance(axes, (int, str, Axis)):
axes = [axes]
join_axes = axes
new_axes = [self_axis.align(other_axis, join=join)
for self_axis, other_axis in zip(self[join_axes], other[join_axes])]
axes_changes = list(zip(join_axes, new_axes))
return self.replace(axes_changes), other.replace(axes_changes)
# XXX: make this into a public method/property? axes_col.flat_labels[flat_indices]?
def _flat_lookup(self, flat_indices):
r"""Return labels corresponding to indices into the flattened axes
Parameters
----------
flat_indices : array-like
indices to get
Examples
--------
>>> from larray import ndtest, Array
>>> arr = ndtest((2, 3))
>>> arr
a\b b0 b1 b2
a0 0 1 2
a1 3 4 5
>>> indices = Array([2, 5, 0], 'draw=d0..d2')
>>> indices
draw d0 d1 d2
2 5 0
>>> arr.axes._flat_lookup(indices)
draw\axis a b
d0 a0 b2
d1 a1 b2
d2 a0 b0
"""
from larray.core.array import asarray, Array, stack
flat_indices = asarray(flat_indices)
axes_indices = np.unravel_index(flat_indices, self.shape)
# This could return an Array with object dtype because axes labels can have different types (but not length)
# TODO: this should be:
# return stack([(axis.name, axis.i[inds]) for axis, inds in zip(axes, axes_indices)], axis='axis')
flat_axes = flat_indices.axes
return stack([(axis.name, Array(axis.labels[inds], flat_axes)) for axis, inds in zip(self, axes_indices)],
axes='axis')
def _adv_keys_to_combined_axis_la_keys(self, key, wildcard=False, sep='_'):
r"""
Returns key with the non-Array "advanced indexing" key parts transformed to Arrays with a combined axis.
Scalar, slice and Array key parts are just left as is.
Parameters
----------
key : tuple
Complete (len(key) == self.ndim) indices-based key.
wildcard : bool, optional
Whether or not to produce a wildcard axis. Defaults to False.
sep : str, optional
Separator to use for creating combined axis name and labels (when wildcard is False). Defaults to '_'.
Returns
-------
tuple
"""
from larray.core.array import Array
assert isinstance(key, tuple) and len(key) == self.ndim
# 1) first compute combined axis
# ==============================
# TODO: use/factorize with AxisCollection.combine_axes. The problem is that it uses product(*axes_labels)
# while here we need zip(*axes_labels)
ignored_types = (int, np.integer, slice, Array)
adv_keys = [(axis_key, axis) for axis_key, axis in zip(key, self)
if not isinstance(axis_key, ignored_types)]
if not adv_keys:
return key
# axes with a scalar key are not taken, since we want to kill them
# all anonymous axes => anonymous combined axis
if all(axis.name is None for axis_key, axis in adv_keys):
combined_name = None
else:
# using axis_id instead of name to allow combining a mix of anonymous & non anonymous axes
combined_name = sep.join(str(self.axis_id(axis)) for axis_key, axis in adv_keys)
# explicitly check that all combined keys have the same length
first_key, first_axis = adv_keys[0]
combined_axis_len = len(first_key)
if not all(len(axis_key) == combined_axis_len for axis_key, axis in adv_keys[1:]):
raise ValueError("all combined keys should have the same length")
if wildcard:
combined_axis = Axis(combined_axis_len, combined_name)
else:
# TODO: the combined keys should be objects which display as:
# (axis1_label, axis2_label, ...) but which should also store
# the axis (names?)
# Q: Should it be the same object as the NDLGroup?/NDKey?
# A: yes, probably. On the Pandas backend, we could/should have
# separate axes. On the numpy backend we cannot.
# TODO: only convert if
if len(adv_keys) == 1:
# we do not convert to string when there is only a single axis
axes_labels = [axis.labels[axis_key]
for axis_key, axis in adv_keys]
# Q: if axis is a wildcard axis, should the result be a
# wildcard axis (and axes_labels discarded?)
combined_labels = axes_labels[0]
else:
axes_labels = [axis.labels.astype(np.str, copy=False)[axis_key].tolist()
for axis_key, axis in adv_keys]
sepjoin = sep.join
combined_labels = [sepjoin(comb) for comb in zip(*axes_labels)]
combined_axis = Axis(combined_labels, combined_name)
combined_axes = AxisCollection(combined_axis)
# 2) transform all advanced non-Array keys to Array with the combined axis
# ========================================================================
return tuple(axis_key if isinstance(axis_key, ignored_types) else Array(axis_key, combined_axes)
for axis_key in key)
class AxisReference(ABCAxisReference, ExprNode, Axis):
def __init__(self, name):
self.name = name
self._labels = None
self._iswildcard = False
def index(self, key):
raise NotImplementedError("an AxisReference (X.) cannot translate labels")
def __repr__(self):
return 'AxisReference(%r)' % self.name
def evaluate(self, context):
r"""
Parameters
----------
context : AxisCollection
Use axes from this collection
"""
return context[self]
# needed because ExprNode.__hash__ (which is object.__hash__) takes precedence over Axis.__hash__
def __hash__(self):
return id(self)
class AxisReferenceFactory(object):
# needed to make pickle work (because we have a __getattr__ which does not return AttributeError on __getstate__)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
def __getattr__(self, key):
return AxisReference(key)
def __getitem__(self, key):
return AxisReference(key)
X = AxisReferenceFactory()
class DeprecatedAxisReferenceFactory(object):
# needed to make pickle work (because we have a __getattr__ which does not return AttributeError on __getstate__)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
def __getattr__(self, key):
warnings.warn("Special variable 'x' is deprecated, use 'X' instead", FutureWarning, stacklevel=2)
return AxisReference(key)
def __getitem__(self, key):
warnings.warn("Special variable 'x' is deprecated, use 'X' instead", FutureWarning, stacklevel=2)
return AxisReference(key)
x = DeprecatedAxisReferenceFactory()
| gpl-3.0 |
tttor/csipb-jamu-prj | predictor/connectivity/classifier/imbalance/ensembled_svm.py | 1 | 5679 | # ensembled_svm.py
import os
import sys
import pickle
import json
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.decomposition import PCA, KernelPCA
from sklearn import svm
from scoop import futures as fu
sys.path.append('../../utility')
import classifier_util as cutil
from esvm_config import config as cfg
class EnsembledSVM:
def __init__(self,simMat):
self._kernel = cfg['kernel']
self._mode = cfg['mode']
self._boostrap = cfg['bootstrap']
self._maxTrainingSamplesPerBatch = cfg['maxTrainingSamplesPerBatch']
self._maxTestingSamplesPerBatch = cfg['maxTestingSamplesPerBatch']
self._maxNumberOfTrainingBatches = cfg['maxNumberOfTrainingBatches']
self._simMat = simMat
self._svmList = []
self._labels = []
def writeSVM(self,outDir):
fpath = os.path.join(outDir,'esvm.pkl')
with open(fpath,'w') as f: pickle.dump(self._svmList,f)
def labels(self):
assert len(self._labels)!=0
return self._labels
def nSVM(self):
return len(self._svmList)
def xtrDimAllBatches(self):
dims = []
for _,xtr,_ in self._svmList:
dims.append( np.asarray(xtr).shape )
return dims
## Fit #########################################################################################
def fit(self,ixtr,iytr):
xyTrList = cutil.divideSamples(ixtr,iytr,self._maxTrainingSamplesPerBatch)
if self._maxNumberOfTrainingBatches != 0:
xyTrList = xyTrList[0:self._maxNumberOfTrainingBatches]
self._svmList = list( fu.map(self._fit,
[xytr[0] for xytr in xyTrList],
[xytr[1] for xytr in xyTrList]) )
assert len(self._svmList)!=0,'empty _svmList in fit()'
self._labels = self._svmList[0][0].classes_.tolist()
for svm in self._svmList: assert svm[0].classes_.tolist()==self._labels
def _fit(self,xtr,ytr):
## dimred
if cfg['dimred']=='pca':
dimred = PCA(n_components=cfg['dimredNComponents'],svd_solver=cfg['dimredSolver'])
elif cfg['dimred']=='kpca':
dimred = KernelPCA(n_components=cfg['dimredNComponents'],kernel=cfg['kernel'],n_jobs=-1)
elif clf['dimred'=='none']:
dimred = None
else:
assert False,'FATAL: unknown dimred'
if dimred is not None:
dimred.fit(xtr)
xtr = dimred.transform(np.asarray(xtr))
## tuning
clf = svm.SVC(kernel=self._kernel,probability=True)
## train
if self._kernel=='precomputed':
assert self._simMat is not None
simMatTr = cutil.makeComProKernelMatFromSimMat(xtr,xtr,self._simMat)
clf.fit(simMatTr,ytr)
else:
clf.fit(xtr,ytr)
return (clf,xtr,dimred)
## Predict #####################################################################################
def predict(self,ixte):
assert len(self._svmList)!=0,'empty _svmList in predict()'
xyTeList = cutil.divideSamples(ixte,None,self._maxTestingSamplesPerBatch)
xTeList = [i[0] for i in xyTeList]; n = len(xTeList)
ypredList = list( fu.map(self._predict,
xTeList,[self._mode]*n,[self._svmList]*n,[self._labels]*n) )
ypredMerged = []; yscoreMerged = [];
for i in ypredList:
ypredMerged += [j[0] for j in i]
yscoreMerged += [j[1] for j in i]
assert len(ypredMerged)==len(ixte),str(len(ypredMerged))+'!='+str(len(ixte))
return (ypredMerged,yscoreMerged)
def _predict(self,xte,mode,svmList,labels):
ypred2 = list( fu.map(self._predict2,
svmList,[xte]*len(svmList)) )
ypred3 = [] # ypred merged from all classifiers
for i in range(len(xte)):# for each member/sample of the vector xte
ypred3i = [ypred2[j][i] for j in range(len(svmList))]# of each sample from all classifiers
ypred3i = self._merge(ypred3i,mode,labels)
ypred3.append(ypred3i)
return ypred3
def _predict2(self,iclf,xte):
clf,xtr,dimred = iclf
xte = dimred.transform(np.asarray(xte))
if self._kernel=='precomputed':
assert self._simMat is not None
simMatTe = cutil.makeComProKernelMatFromSimMat(xte,xtr,self._simMat)
ypred2iRaw = clf.predict(simMatTe) # remember: ypred2i is a vector
ypredproba2iRaw = clf.predict_proba(simMatTe)
else:
ypred2iRaw = clf.predict(xte) # remember: ypred2i is a vector
ypredproba2iRaw = clf.predict_proba(xte)
ypred2i = zip(ypred2iRaw,ypredproba2iRaw)
return ypred2i
def _merge(self,yListRaw,mode,labels):
y = None; yscore = None
yList,yprobaList = zip(*yListRaw)
if mode=='hard':
counters = [0]*len(labels)
for i in yList: counters[ labels.index(i) ] += 1
assert sum(counters)==len(yList),'sum(counters)!=len(yList)'
y = labels[ counters.index(max(counters)) ]
yscore = max(counters)/float(sum(counters))
elif mode=='soft':
avgProbaList = [] # from all labels, averaged over all classifiers
for i in range(len(labels)): avgProbaList.append(np.mean([ j[i] for j in yprobaList ]))
yscore = max(avgProbaList)
y = labels[ avgProbaList.index(yscore) ]
else:
assert False,'FATAL: unkown mode'
return (y,yscore)
| mit |
KellyChan/Python | python/sklearn/examples/general/clustering_text_documents_using_k-means.py | 3 | 6625 | #---------------------------------------------------------------#
# Project: Clustering text documents using k-means
# Author: Kelly Chan
# Date: Apr 25 2014
#---------------------------------------------------------------#
from __future__ import print_function
import sys
import logging
from time import time
from optparse import OptionParser
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import metrics
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans, MiniBatchKMeans
# disply progress logs on stdout
logging.basicConfig(level=logging.INFO, \
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa", \
dest="n_components", \
type="int", \
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch", \
action="store_false", \
dest="minibatch", \
default=True, \
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf", \
action="store_false", \
dest="use_idf", \
default=True, \
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing", \
action="store_true", \
default=False, \
help="Use a hashing feature vectorizer")
op.add_option("--n-features", \
type=int, \
default=10000, \
help="Maximum number of features (dimensions) to extract from text.")
op.add_option("--verbose", \
action="store_true", \
dest="verbose", \
default=False, \
help="Print progress reports inside k-means algorithm.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
def loadCategories():
# load some categories from the training set
categories = ['alt.atheism', \
'talk.religion.misc', \
'comp.graphics', \
'sci.space', \
]
return categories
def loadData(categories):
# uncomment the following for all categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', \
categories=categories, \
shuffle=True, \
random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
return dataset
def extractFeatures(dataset):
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features, \
stop_words='english', \
non_negative=True, \
norm=None, \
binary=False)
vectorizer = Pipeline((('hasher', hasher), \
('tf_idf', TfidTransformer())))
else:
vectorizer = HashingVectorizer(n_features=opts.n_features, \
stop_words='english', \
non_negative=False, \
norm='12', \
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, \
max_features=opts.n_features, \
stop_words='english', \
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
return X, labels
def reduceDimensions(X):
print("Performing dimensionality reduction using LSA")
t0 = time()
lsa = TruncatedSVD(opts.n_components)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
print()
return X
def normalize(X):
print("Normalizing data for K-Means")
t0 = time()
X = Normalizer(copy=False).fit_transform(X)
print("done in %fs" % (time() - t0))
print()
return X
def clusteringKMeans(X):
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, \
init='k-means++', \
n_init=1, \
init_size=1000, \
batch_size=1000, \
verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, \
init='k-means++', \
max_iter=100, \
n_init=1, \
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
y_clust = km.fit(X)
print("done in %.3fs" % (time() - t0))
print()
return y_clust, km
def evaluate(km, labels):
print("Homogeneity: %.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %.3f" % metrics.silhouette_score(X, \
labels, \
sample_size=1000))
def main():
categories = loadCategories()
dataset = loadData(categories)
X, labels = extractFeatures(dataset)
X = reduceDimensions(X)
X = normalize(X)
y_clust, km = clusteringKMeans(X)
evaluate(km, labels)
if __name__ == '__main__':
main()
| mit |
samuelshaner/openmc | tests/test_mgxs_library_mesh/test_mgxs_library_mesh.py | 1 | 2755 | #!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Generate inputs using parent class routine
super(MGXSTestHarness, self)._build_inputs()
# Initialize a one-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 20.e6])
# Initialize MGXS Library for a few cross section types
# for one material-filled cell in the geometry
self.mgxs_lib = openmc.mgxs.Library(self._input_set.geometry)
self.mgxs_lib.by_nuclide = False
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'mesh'
# Instantiate a tally mesh
mesh = openmc.Mesh(mesh_id=1)
mesh.type = 'regular'
mesh.dimension = [2, 2]
mesh.lower_left = [-100., -100.]
mesh.width = [100., 100.]
self.mgxs_lib.domains = [mesh]
self.mgxs_lib.build_library()
# Initialize a tallies file
self._input_set.tallies = openmc.Tallies()
self.mgxs_lib.add_to_tallies_file(self._input_set.tallies, merge=False)
self._input_set.tallies.export_to_xml()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each 1-group MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = MGXSTestHarness('statepoint.10.*', True)
harness.main()
| mit |
AdaptivePELE/AdaptivePELE | AdaptivePELE/analysis/selectOnPlot.py | 1 | 16789 | #!/usr/bin/env python
# title :selectOnPlot.py
# description :Generates a scatterplot where you can draw and select specific dots.
# author :Carles Perez Lopez
# date :20190219
# python_version :3.6.5
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import argparse
import glob
import multiprocessing as mp
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import LassoSelector
from matplotlib.path import Path
from AdaptivePELE.atomset import atomset
import AdaptivePELE.utilities.utilities as adapt_tools
avail_backend = adapt_tools.get_available_backend()
if avail_backend is not None:
plt.switch_backend(avail_backend)
try:
FileExistsError
except NameError:
FileExistsError = OSError
def parseArguments():
"""
Parse command line arguments
:returns: str, str str, str, str, bool, int, str, str, str, -- path to adaptive's results,
column to plot in the X axis, column to plot in the Y axis, column to plot in the Z axis,
path to the output's folder, whether to use a summary.csv already created, number of
processors, report prefix, trajectory prefix, separator of csvs
"""
desc = "Generates a scatterplot of Adaptive's results given two or three columns (X, Y, and Z if set).\n" \
"This plot allows the selection of desired points by drawing. Structures will be selected and \n" \
"stored into an output folder. Additionally, a report file of this selected structures will be created. \n" \
"To be run for example like: \n" \
"\">python selectOnPlot.py /home/usr/adaptiveresults -xcol 'Binding Energy' -ycol epoch\""
parser = argparse.ArgumentParser(description=desc)
required_named = parser.add_argument_group('required arguments')
required_named.add_argument("res_path", type=str,
help="Path to Adaptive results.")
parser.add_argument("-xcol", type=str, default="epoch",
help="Column name of the report file that will be used in the X axis.")
parser.add_argument("-ycol", type=str, default="Binding Energy",
help="Column name of the report file that will be used in the Y axis.")
parser.add_argument("-zcol", type=str, default=None,
help="If set, column name of the report file that will be used in the Z axis (colorbar).")
parser.add_argument("-outfol", type=str, default=None,
help="If set, path to the output's folder. By default it will be created in the Adaptive's \n"
"results path. WARNING: Take into account that if the folder already exists it will be \n"
"overwritten!!!")
parser.add_argument("--top", type=str, default=None,
help="If set, path to the topology folder. This can be a pkl object or a simple pdb file")
parser.add_argument("-done", action="store_true",
help="If this is not the first time that you run this script, it is strongly recommended to \n"
"set this parameter on. If it is set, instead of looking all the reports and create a new\n"
"one, the script will use the summary csv of previous usages, saving computational time.")
parser.add_argument("-cpus", type=int, default=4,
help="Number of processors that you want to use in order to save time.")
parser.add_argument("-report", type=str, default="report_",
help="PELE's report prefix.")
parser.add_argument("-traj", type=str, default="trajectory_",
help="Adaptive's trajectory prefix.")
parser.add_argument("-sep", type=str, default=";",
help="Separator string that will be used in the CSV files.")
parser.add_argument("-skip_first_row", action="store_true", help="Skip the first row in the reports (initial conformation)")
args = parser.parse_args()
return args.res_path, args.xcol, args.ycol, args.zcol, args.outfol, args.done, args.cpus, args.report, args.traj, args.sep, args.top, args.skip_first_row
class SelectFromCollection(object):
"""Select indices from a matplotlib collection using `LassoSelector`.
Selected indices are saved in the `ind` attribute. This tool fades out the
points that are not part of the selection (i.e., reduces their alpha
values). If your collection has alpha < 1, this tool will permanently
alter the alpha values.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : :class:`~matplotlib.axes.Axes`
Axes to interact with.
collection : :class:`matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to `alpha_other`.
"""
def __init__(self, ax, collection, alpha_other=0.1):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, (self.Npts, 1))
self.lasso = LassoSelector(ax, onselect=self.onselect)
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero(path.contains_points(self.xys))[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.lasso.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def concat_reports_in_csv(adaptive_results_path, output_file_path, report_prefix="report_",
trajectory_prefix="trajectory_", separator_out=";",
skip_first=False):
"""
It search report files in Adaptive's result folder and creates a csv file with everything concatenated, adding the
epoch and trajectory information.
:param adaptive_results_path: Path to the results folder of Adaptive.
:type adaptive_results_path: str
:param output_file_path: Path of the output file.
:type output_file_path: str
:param report_prefix: Prefix of PELE's reports.
:type report_prefix: str
:param trajectory_prefix: Prefix of PELE's trajectories.
:type trajectory_prefix: str
:param separator_out: Separator string used in the csv file.
:type separator_out: str
:param skip_first: Whether to skip the first row of the report
:type skip_first: bool
:return: Creates a csv file.
"""
dataframe_lists = []
for adaptive_epoch in range(0, 2000):
folder = os.path.join(adaptive_results_path, str(adaptive_epoch))
if os.path.exists(folder):
report_list = adapt_tools.getReportList("{}/*{}*".format(folder, report_prefix))
for n, report in enumerate(report_list):
if skip_first:
pandas_df = pd.read_csv(report, sep=" ", engine="python", index_col=False, header=0, skiprows=[1])
else:
pandas_df = pd.read_csv(report, sep=" ", engine="python", index_col=False, header=0)
pandas_df["epoch"] = adaptive_epoch
pandas_df["trajectory"] = glob.glob("{}/{}/*{}{}.*".format(adaptive_results_path, adaptive_epoch,
trajectory_prefix, n + 1))[0]
dataframe_lists.append(pandas_df)
else:
break
dataframe = pd.concat(dataframe_lists, ignore_index=True)
dataframe.to_csv(output_file_path, sep=separator_out, index=False)
def trajectory_and_snapshot_to_pdb(trajectory_path, snapshot, output_path, topology_contents):
"""
Given an absolute path to a trajectory of Adaptive and a snapshot (MODEL) in xtc format, the function transform it
into a PDB format.
:param trajectory_path: Absolute path to a trajectory from Adaptive, in xtc format.
:type trajectory_path:str
:param snapshot: model of a trajectory that you want to transform.
:type snapshot: int
:param output_path: output path of the new pdb file.
:type output_path: str
:return: Creates a PDB file.
"""
# get the path where the adaptive simulation resides
topology_path_splited = trajectory_path.split(os.sep)
epoch = int(topology_path_splited[-2])
traj = adapt_tools.getTrajNum(topology_path_splited[-1])
trajectory = adapt_tools.getSnapshots(trajectory_path)
try:
single_model = trajectory[snapshot]
PDB = atomset.PDB()
PDB.initialise(single_model, topology=topology_contents.getTopology(epoch, traj))
except IndexError:
exit("You are selecting the model {} for a trajectory that has {} models, please, reselect the model index "
"(starting from 0).".format(snapshot, len(trajectory)))
with open(output_path, "w") as fw:
fw.write("MODEL %4d\n" % (snapshot + 1))
fw.write(PDB.pdb)
fw.write("ENDMDL\n")
fw.write("END\n")
def get_pdb_from_xtc(row, pdbs_output_path, column_file="trajectory", topology=None):
"""
Given a row of a dataframe (expected to come from a csv report) and a column name (that must contain the path to
its correspondent trajectory), this function extract the file in PDB format in an output file.
:param row: row of a dataframe (Pandas object).
:type row: pandas.DataFrame
:param pdbs_output_path: output path for the PDB file.
:type pdbs_output_path: str
:param column_file: Column name of the dataframe that contains the path to the trajectory file.
:type column_file: str
:return:
"""
foldername = row[column_file]
filepath = glob.glob(foldername)[0]
epoch = filepath.split("/")[-2]
snapshot = row["numberOfAcceptedPeleSteps"]
new_file_name = os.path.basename(foldername.split("/")[-1])
new_file_name = new_file_name.split(".")[0]
out_path = os.path.join(pdbs_output_path, "{}_epoch_{}_snap_{}.pdb".format(new_file_name, epoch, snapshot))
trajectory_and_snapshot_to_pdb(filepath, snapshot, out_path, topology)
print(out_path)
def get_pdbs_from_df_in_xtc(df, pdbs_output_path, processors=4, column_file="trajectory", topology=None):
"""
It uses the function "get_pdb_from_xtc" for a whole dataframe using multiprocessing.
:param df: Dataframe object (Pandas)
:type df: pandas.DataFrame
:param pdbs_output_path: Output path for PDB files.
:type pdbs_output_path: str
:param processors: Number of processes to do with multiprocessing.
:type processors: int
:param column_file: Column name of the dataframe that contains the path to the trajectory file.
:type column_file: str
:return:
"""
pool = mp.Pool(processes=processors)
multiprocessing_list = []
for _, row in df.iterrows():
multiprocessing_list.append(pool.apply_async(get_pdb_from_xtc,
(row, pdbs_output_path, column_file, topology)))
for process in multiprocessing_list:
process.get()
def main(adaptive_results_folder, column_to_x="epoch", column_to_y="Binding Energy", column_to_z=None,
output_selection_folder=None, summary_done=False, processors=4, report_pref="report_",
trajectory_pref="trajectory_", separator=";",
column_file="trajectory", topology=None, skip_first=False):
"""
Generates a scatterplot of Adaptive's results given two or three columns (X, Y, and Z if set).
This plot allows the selection of desired points by drawing. Structures will be selected and
stored into an output folder. Additionally, a report file of this selected structures
will be created.
:param adaptive_results_folder: Path to Adaptive results.
:type adaptive_results_folder: str
:param column_to_x: Column name of the report file that will be used in the X axis.
:type column_to_x: str
:param column_to_y: Column name of the report file that will be used in the Y axis.
:type column_to_y: str
:param column_to_z: If set, column name of the report file that will be used in the Z axis (colorbar).
:type column_to_z: str
:param output_selection_folder: If set, path to the output's folder. By default it will be created in the
Adaptive's results path. WARNING: Take into account that if the folder already exists it will be overwritten!!!
:type output_selection_folder: str
:param summary_done: If it is set, instead of looking all the reports and create a new one, the script will use
the summary csv of previous usages, saving computational time."
:type summary_done: bool
:param processors: Number of processors that you want to use in order to save time.
:type processors: int
:param report_pref: PELE's report prefix.
:type report_pref: str
:param trajectory_pref: Adaptive's trajectory prefix.
:type trajectory_pref: str
:param separator: Separator string that will be used in the CSV files.
:type separator: str
:param column_file: Column name of the dataframe that contains the path to the trajectory file.
:type column_file: str
:param topology: Path to the topology for the simulation
:type topology: str
:param skip_first: Whether to skip the first row of the report
:type skip_first: bool
:return:
"""
summary_csv_filename = os.path.join(adaptive_results_folder, "summary.csv")
if not summary_done:
concat_reports_in_csv(adaptive_results_path=adaptive_results_folder, output_file_path=summary_csv_filename,
report_prefix=report_pref, trajectory_prefix=trajectory_pref,
separator_out=separator, skip_first=skip_first)
dataframe = pd.read_csv(summary_csv_filename, sep=separator, engine='python', header=0)
fig, ax = plt.subplots()
if column_to_z:
pts = ax.scatter(dataframe[column_to_x], dataframe[column_to_y], c=dataframe[column_to_z], s=20)
plt.colorbar(pts)
else:
pts = ax.scatter(dataframe[column_to_x], dataframe[column_to_y], s=20)
selector = SelectFromCollection(ax, pts)
if topology is not None:
topology_contents = adapt_tools.getTopologyObject(topology)
else:
topology_contents = None
def accept(event, output_selection_folder=output_selection_folder):
if event.key == "enter":
print("Selected points:")
df_select = dataframe.loc[selector.ind]
print(df_select)
counter = 0
if not output_selection_folder:
output_selection_folder = os.path.join(adaptive_results_folder, "selected_from_plot")
while True:
try:
os.mkdir(output_selection_folder+"_"+str(counter))
break
except FileExistsError:
counter += 1
output_selection_folder = output_selection_folder+"_"+str(counter)
df_select.to_csv(os.path.join(output_selection_folder, "selection_report.csv"), sep=separator, index=False)
get_pdbs_from_df_in_xtc(df_select, output_selection_folder, processors=processors, column_file=column_file, topology=topology_contents)
selector.disconnect()
ax.set_title("")
fig.canvas.draw()
fig.canvas.mpl_connect("key_press_event", accept)
ax.set_title("Press enter to accept selected points.")
ax.set_xlabel(column_to_x)
ax.set_ylabel(column_to_y)
plt.show()
if __name__ == '__main__':
res_path, xcol, ycol, zcol, outfol, done, cpus, report_name, traj_name, sep, top, skip_first_row = parseArguments()
main(adaptive_results_folder=res_path, column_to_x=xcol, column_to_y=ycol, column_to_z=zcol,
output_selection_folder=outfol, summary_done=done, processors=cpus, report_pref=report_name,
trajectory_pref=traj_name, separator=sep, topology=top, skip_first=skip_first_row)
| mit |
AntonioBoccia/circumnavigation-planner | nodes/plotter.py | 1 | 2783 | #! /usr/bin/python
import rospy as rp
import geomtwo.msg as gms
import threading as thd
import copy as cp
import matplotlib.pyplot as plt
rp.init_node('plotter')
#XMIN = rp.get_param('xmin')
#XMAX = rp.get_param('xmax')
#YMIN = rp.get_param('ymin')
#YMAX = rp.get_param('ymax')
AGENT_COLOR = rp.get_param('agent_color')
ESTIMATE_COLOR = rp.get_param('estimate_color')
TARGET_COLOR = rp.get_param('target_color')
AGENT_NAMES = rp.get_param('agent_names').split()
TARGET_POSITION = rp.get_param('target_position')
RATE = rp.Rate(3.0e1)
LOCK = thd.Lock()
plt.ion()
plt.figure()
plt.scatter(*TARGET_POSITION, color=TARGET_COLOR)
#plt.xlim((XMIN, XMAX))
#plt.ylim((YMIN, YMAX))
plt.axis('equal')
plt.grid(True)
plt.draw()
agent_positions = {name: None for name in AGENT_NAMES}
agent_artists = {name: None for name in AGENT_NAMES}
estimates = {name: None for name in AGENT_NAMES}
estimate_artists = {name: None for name in AGENT_NAMES}
def agent_callback(msg, name):
global agent_positions
LOCK.acquire()
agent_positions[name] = [msg.x, msg.y]
LOCK.release()
for name in AGENT_NAMES:
rp.Subscriber(
name=name+'/position',
data_class=gms.Point,
callback=agent_callback,
callback_args=name,
queue_size=1)
def estimate_callback(msg, name):
global estimates
LOCK.acquire()
estimates[name] = [msg.x, msg.y]
LOCK.release()
for name in AGENT_NAMES:
rp.Subscriber(
name=name+'/estimate',
data_class=gms.Point,
callback=estimate_callback,
callback_args=name,
queue_size=1)
# def estimate_callback(msg):
# global estimate
# LOCK.acquire()
# estimate = [msg.x, msg.y]
# LOCK.release()
# rp.Subscriber(name='estimate',
# data_class=gms.Point,
# callback=estimate_callback)
while not rp.is_shutdown():
ag_pos = {name: None for name in AGENT_NAMES}
est = {name: None for name in AGENT_NAMES}
LOCK.acquire()
for name in AGENT_NAMES:
if not agent_positions[name] is None:
ag_pos[name] = cp.copy(agent_positions[name])
agent_positions[name] = None
if not estimates[name] is None:
est[name] = cp.copy(estimates[name])
estimates[name] = None
LOCK.release()
for name in AGENT_NAMES:
if not ag_pos[name] is None:
if not agent_artists[name] is None:
agent_artists[name].remove()
agent_artists[name] = plt.scatter(*ag_pos[name], color=AGENT_COLOR)
if not est[name] is None:
if not estimate_artists[name] is None:
estimate_artists[name].remove()
estimate_artists[name] = plt.scatter(*est[name], color=ESTIMATE_COLOR)
plt.draw()
RATE.sleep()
| gpl-3.0 |
mfherbst/spack | var/spack/repos/builtin/packages/py-wcsaxes/package.py | 5 | 1820 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyWcsaxes(PythonPackage):
"""WCSAxes is a framework for making plots of Astronomical data
in Matplotlib."""
homepage = "http://wcsaxes.readthedocs.io/en/latest/index.html"
url = "https://github.com/astrofrog/wcsaxes/archive/v0.8.tar.gz"
version('0.8', 'de1c60fdae4c330bf5ddb9f1ab5ab920')
extends('python', ignore=r'bin/')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
| lgpl-2.1 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_svg.py | 8 | 46393 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
from matplotlib.externals.six import unichr
import os, base64, tempfile, gzip, io, sys, codecs, re
import numpy as np
from hashlib import md5
import uuid
from matplotlib import verbose, __version__, rcParams
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import is_string_like, is_writable_file_like, maxdict
from matplotlib.colors import rgb2hex
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
from matplotlib import _png
from xml.sax.saxutils import escape as escape_xml_text
backend_version = __version__
# ----------------------------------------------------------------------
# SimpleXMLWriter class
#
# Based on an original by Fredrik Lundh, but modified here to:
# 1. Support modern Python idioms
# 2. Remove encoding support (it's handled by the file writer instead)
# 3. Support proper indentation
# 4. Minify things a little bit
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
def escape_cdata(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
_escape_xml_comment = re.compile(r'-(?=-)')
def escape_comment(s):
s = escape_cdata(s)
return _escape_xml_comment.sub('- ', s)
def escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
class XMLWriter(object):
def __init__(self, file):
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__indentation = " " * 64
def __flush(self, indent=True):
# flush internal buffers
if self.__open:
if indent:
self.__write(">\n")
else:
self.__write(">")
self.__open = 0
if self.__data:
data = ''.join(self.__data)
self.__write(escape_cdata(data))
self.__data = []
## Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. The method returns
# an opaque identifier that can be passed to the <b>close</b>
# method, to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag)
self.__data = []
self.__tags.append(tag)
self.__write(self.__indentation[:len(self.__tags) - 1])
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(six.iteritems(attrib))
attrib.sort()
for k, v in attrib:
if not v == '':
k = escape_cdata(k)
v = escape_attrib(v)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as a Unicode string.
def comment(self, comment):
self.__flush()
self.__write(self.__indentation[:len(self.__tags)])
self.__write("<!-- %s -->\n" % escape_comment(comment))
##
# Adds character data to the output stream.
#
# @param text Character data, as a Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None, indent=True):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush(indent)
elif self.__open:
self.__open = 0
self.__write("/>\n")
return
if indent:
self.__write(self.__indentation[:len(self.__tags)])
self.__write("</%s>\n" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
self.start(*(tag, attrib), **extra)
if text:
self.data(text)
self.end(indent=False)
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
# ----------------------------------------------------------------------
def generate_transform(transform_list=[]):
if len(transform_list):
output = io.StringIO()
for type, value in transform_list:
if type == 'scale' and (value == (1.0,) or value == (1.0, 1.0)):
continue
if type == 'translate' and value == (0.0, 0.0):
continue
if type == 'rotate' and value == (0.0,):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
output.write('%s(%s)' % (type, ' '.join(str(x) for x in value)))
return output.getvalue()
return ''
def generate_css(attrib={}):
if attrib:
output = io.StringIO()
attrib = list(six.iteritems(attrib))
attrib.sort()
for k, v in attrib:
k = escape_attrib(k)
v = escape_attrib(v)
output.write("%s:%s;" % (k, v))
return output.getvalue()
return ''
_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
class RendererSVG(RendererBase):
FONT_SCALE = 100.0
fontd = maxdict(50)
def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
self.width = width
self.height = height
self.writer = XMLWriter(svgwriter)
self.image_dpi = image_dpi # the actual dpi we want to rasterize stuff with
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = {}
self._char_defs = {}
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self._hatchd = {}
self._has_gouraud = False
self._n_gradients = 0
self._fonts = {}
self.mathtext_parser = MathTextParser('SVG')
RendererBase.__init__(self)
self._glyph_map = dict()
svgwriter.write(svgProlog)
self._start_id = self.writer.start(
'svg',
width='%ipt' % width, height='%ipt' % height,
viewBox='0 0 %i %i' % (width, height),
xmlns="http://www.w3.org/2000/svg",
version="1.1",
attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
self._write_default_style()
def finalize(self):
self._write_clips()
self._write_hatches()
self._write_svgfonts()
self.writer.close(self._start_id)
self.writer.flush()
def _write_default_style(self):
writer = self.writer
default_style = generate_css({
'stroke-linejoin': 'round',
'stroke-linecap': 'butt',
# Disable the miter limit. 100000 seems to be close to
# the maximum that renderers support before breaking.
'stroke-miterlimit': '100000'})
writer.start('defs')
writer.start('style', type='text/css')
writer.data('*{%s}\n' % default_style)
writer.end('style')
writer.end('defs')
def _make_id(self, type, content):
content = str(content)
salt = str(uuid.uuid4())
if six.PY3:
content = content.encode('utf8')
salt = salt.encode('utf8')
m = md5()
m.update(salt)
m.update(content)
return '%s%s' % (type, m.hexdigest()[:10])
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _get_font(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(fname)
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_hatch(self, gc, rgbFace):
"""
Create a new hatch pattern
"""
if rgbFace is not None:
rgbFace = tuple(rgbFace)
edge = gc.get_rgb()
if edge is not None:
edge = tuple(edge)
dictkey = (gc.get_hatch(), rgbFace, edge)
oid = self._hatchd.get(dictkey)
if oid is None:
oid = self._make_id('h', dictkey)
self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
else:
_, oid = oid
return oid
def _write_hatches(self):
if not len(self._hatchd):
return
HATCH_SIZE = 72
writer = self.writer
writer.start('defs')
for ((path, face, stroke), oid) in six.itervalues(self._hatchd):
writer.start(
'pattern',
id=oid,
patternUnits="userSpaceOnUse",
x="0", y="0", width=six.text_type(HATCH_SIZE),
height=six.text_type(HATCH_SIZE))
path_data = self._convert_path(
path,
Affine2D().scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
simplify=False)
if face is None:
fill = 'none'
else:
fill = rgb2hex(face)
writer.element(
'rect',
x="0", y="0", width=six.text_type(HATCH_SIZE+1),
height=six.text_type(HATCH_SIZE+1),
fill=fill)
writer.element(
'path',
d=path_data,
style=generate_css({
'fill': rgb2hex(stroke),
'stroke': rgb2hex(stroke),
'stroke-width': '1.0',
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter'
})
)
writer.end('pattern')
writer.end('defs')
def _get_style_dict(self, gc, rgbFace):
"""
return the style string. style is generated from the
GraphicsContext and rgbFace
"""
attrib = {}
forced_alpha = gc.get_forced_alpha()
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
if rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
else:
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = rgb2hex(rgbFace)
if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
if forced_alpha and gc.get_alpha() != 1.0:
attrib['opacity'] = str(gc.get_alpha())
offset, seq = gc.get_dashes()
if seq is not None:
attrib['stroke-dasharray'] = ','.join(['%f' % val for val in seq])
attrib['stroke-dashoffset'] = six.text_type(float(offset))
linewidth = gc.get_linewidth()
if linewidth:
rgb = gc.get_rgb()
attrib['stroke'] = rgb2hex(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['stroke-opacity'] = str(rgb[3])
if linewidth != 1.0:
attrib['stroke-width'] = str(linewidth)
if gc.get_joinstyle() != 'round':
attrib['stroke-linejoin'] = gc.get_joinstyle()
if gc.get_capstyle() != 'butt':
attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
return attrib
def _get_style(self, gc, rgbFace):
return generate_css(self._get_style_dict(gc, rgbFace))
def _get_clip(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
clippath_trans = self._make_flip_transform(clippath_trans)
dictkey = (id(clippath), str(clippath_trans))
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
dictkey = (x, y, w, h)
else:
return None
clip = self._clipd.get(dictkey)
if clip is None:
oid = self._make_id('p', dictkey)
if clippath is not None:
self._clipd[dictkey] = ((clippath, clippath_trans), oid)
else:
self._clipd[dictkey] = (dictkey, oid)
else:
clip, oid = clip
return oid
def _write_clips(self):
if not len(self._clipd):
return
writer = self.writer
writer.start('defs')
for clip, oid in six.itervalues(self._clipd):
writer.start('clipPath', id=oid)
if len(clip) == 2:
clippath, clippath_trans = clip
path_data = self._convert_path(clippath, clippath_trans, simplify=False)
writer.element('path', d=path_data)
else:
x, y, w, h = clip
writer.element('rect', x=six.text_type(x), y=six.text_type(y),
width=six.text_type(w), height=six.text_type(h))
writer.end('clipPath')
writer.end('defs')
def _write_svgfonts(self):
if not rcParams['svg.fonttype'] == 'svgfont':
return
writer = self.writer
writer.start('defs')
for font_fname, chars in six.iteritems(self._fonts):
font = FT2Font(font_fname)
font.set_size(72, 72)
sfnt = font.get_sfnt()
writer.start('font', id=sfnt[(1, 0, 0, 4)])
writer.element(
'font-face',
attrib={
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'units-per-em': '72',
'bbox': ' '.join(six.text_type(x / 64.0) for x in font.bbox)})
for char in chars:
glyph = font.load_char(char, flags=LOAD_NO_HINTING)
verts, codes = font.get_path()
path = Path(verts, codes)
path_data = self._convert_path(path)
# name = font.get_glyph_name(char)
writer.element(
'glyph',
d=path_data,
attrib={
# 'glyph-name': name,
'unicode': unichr(char),
'horiz-adv-x': six.text_type(glyph.linearHoriAdvance / 65536.0)})
writer.end('font')
writer.end('defs')
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group.
"""
if gid:
self.writer.start('g', id=gid)
else:
self._groupd[s] = self._groupd.get(s, 0) + 1
self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
def close_group(self, s):
self.writer.end('g')
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
if rcParams['svg.image_noscale']:
return True
else:
return not rcParams['image.composite_image']
def _convert_path(self, path, transform=None, clip=None, simplify=None,
sketch=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, sketch, 6,
[b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
def draw_path(self, gc, path, transform, rgbFace=None):
trans_and_flip = self._make_flip_transform(transform)
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
path_data = self._convert_path(
path, trans_and_flip, clip=clip, simplify=simplify,
sketch=gc.get_sketch_params())
attrib = {}
attrib['style'] = self._get_style(gc, rgbFace)
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
self.writer.element('path', d=path_data, attrib=attrib)
if gc.get_url() is not None:
self.writer.end('a')
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if not len(path.vertices):
return
writer = self.writer
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
style = self._get_style_dict(gc, rgbFace)
dictkey = (path_data, generate_css(style))
oid = self._markers.get(dictkey)
for key in list(six.iterkeys(style)):
if not key.startswith('stroke'):
del style[key]
style = generate_css(style)
if oid is None:
oid = self._make_id('m', dictkey)
writer.start('defs')
writer.element('path', id=oid, d=path_data, style=style)
writer.end('defs')
self._markers[dictkey] = oid
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
writer.start('g', attrib=attrib)
trans_and_flip = self._make_flip_transform(trans)
attrib = {'xlink:href': '#%s' % oid}
clip = (0, 0, self.width*72, self.height*72)
for vertices, code in path.iter_segments(
trans_and_flip, clip=clip, simplify=False):
if len(vertices):
x, y = vertices[-2:]
attrib['x'] = six.text_type(x)
attrib['y'] = six.text_type(y)
attrib['style'] = self._get_style(gc, rgbFace)
writer.element('use', attrib=attrib)
writer.end('g')
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 5) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 9 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
writer = self.writer
path_codes = []
writer.start('defs')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform, simplify=False)
oid = 'C%x_%x_%s' % (self._path_collection_id, i,
self._make_id('', d))
writer.element('path', id=oid, d=d)
path_codes.append(oid)
writer.end('defs')
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
clipid = self._get_clip(gc0)
url = gc0.get_url()
if url is not None:
writer.start('a', attrib={'xlink:href': url})
if clipid is not None:
writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
attrib = {
'xlink:href': '#%s' % path_id,
'x': six.text_type(xo),
'y': six.text_type(self.height - yo),
'style': self._get_style(gc0, rgbFace)
}
writer.element('use', attrib=attrib)
if clipid is not None:
writer.end('g')
if url is not None:
writer.end('a')
self._path_collection_id += 1
def draw_gouraud_triangle(self, gc, points, colors, trans):
# This uses a method described here:
#
# http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
#
# that uses three overlapping linear gradients to simulate a
# Gouraud triangle. Each gradient goes from fully opaque in
# one corner to fully transparent along the opposite edge.
# The line between the stop points is perpendicular to the
# opposite edge. Underlying these three gradients is a solid
# triangle whose color is the average of all three points.
writer = self.writer
if not self._has_gouraud:
self._has_gouraud = True
writer.start(
'filter',
id='colorAdd')
writer.element(
'feComposite',
attrib={'in': 'SourceGraphic'},
in2='BackgroundImage',
operator='arithmetic',
k2="1", k3="1")
writer.end('filter')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
# Just skip fully-transparent triangles
if avg_color[-1] == 0.0:
return
trans_and_flip = self._make_flip_transform(trans)
tpoints = trans_and_flip.transform(points)
writer.start('defs')
for i in range(3):
x1, y1 = tpoints[i]
x2, y2 = tpoints[(i + 1) % 3]
x3, y3 = tpoints[(i + 2) % 3]
c = colors[i][:]
if x2 == x3:
xb = x2
yb = y1
elif y2 == y3:
xb = x1
yb = y2
else:
m1 = (y2 - y3) / (x2 - x3)
b1 = y2 - (m1 * x2)
m2 = -(1.0 / m1)
b2 = y1 - (m2 * x1)
xb = (-b1 + b2) / (m1 - m2)
yb = m2 * xb + b2
writer.start(
'linearGradient',
id="GR%x_%d" % (self._n_gradients, i),
x1=six.text_type(x1), y1=six.text_type(y1),
x2=six.text_type(xb), y2=six.text_type(yb))
writer.element(
'stop',
offset='0',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': six.text_type(c[-1])}))
writer.element(
'stop',
offset='1',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': "0"}))
writer.end('linearGradient')
writer.element(
'polygon',
id='GT%x' % self._n_gradients,
points=" ".join([six.text_type(x)
for x in (x1, y1, x2, y2, x3, y3)]))
writer.end('defs')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
href = '#GT%x' % self._n_gradients
writer.element(
'use',
attrib={'xlink:href': href,
'fill': rgb2hex(avg_color),
'fill-opacity': str(avg_color[-1])})
for i in range(3):
writer.element(
'use',
attrib={'xlink:href': href,
'fill': 'url(#GR%x_%d)' % (self._n_gradients, i),
'fill-opacity': '1',
'filter': 'url(#colorAdd)'})
self._n_gradients += 1
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
self.writer.start('g', attrib=attrib)
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
self.writer.end('g')
def option_scale_image(self):
return True
def get_image_magnification(self):
return self.image_dpi / 72.0
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
# Can't apply clip-path directly to the image because the
# image has a transformation, which would also be applied
# to the clip-path
self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
trans = [1,0,0,1,0,0]
if rcParams['svg.image_noscale']:
trans = list(im.get_matrix())
trans[5] = -trans[5]
attrib['transform'] = generate_transform([('matrix', tuple(trans))])
assert trans[1] == 0
assert trans[2] == 0
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
h,w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
oid = getattr(im, '_gid', None)
url = getattr(im, '_url', None)
if url is not None:
self.writer.start('a', attrib={'xlink:href': url})
if rcParams['svg.image_inline']:
bytesio = io.BytesIO()
_png.write_png(np.array(im)[::-1], bytesio)
oid = oid or self._make_id('image', bytesio)
attrib['xlink:href'] = (
"data:image/png;base64,\n" +
base64.b64encode(bytesio.getvalue()).decode('ascii'))
else:
self._imaged[self.basename] = self._imaged.get(self.basename,0) + 1
filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
verbose.report( 'Writing image file for inclusion: %s' % filename)
_png.write_png(np.array(im)[::-1], filename)
oid = oid or 'Im_' + self._make_id('image', filename)
attrib['xlink:href'] = filename
alpha = gc.get_alpha()
if alpha != 1.0:
attrib['opacity'] = str(alpha)
attrib['id'] = oid
if transform is None:
self.writer.element(
'image',
x=six.text_type(x/trans[0]),
y=six.text_type((self.height-y)/trans[3]-h),
width=six.text_type(w), height=six.text_type(h),
attrib=attrib)
else:
flipped = self._make_flip_transform(transform)
flipped = np.array(flipped.to_values())
y = y+dy
if dy > 0.0:
flipped[3] *= -1.0
y *= -1.0
attrib['transform'] = generate_transform(
[('matrix', flipped)])
self.writer.element(
'image',
x=six.text_type(x), y=six.text_type(y),
width=six.text_type(dx), height=six.text_type(abs(dy)),
attrib=attrib)
if url is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def _adjust_char_id(self, char_id):
return char_id.replace("%20", "_")
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
writer = self.writer
writer.comment(s)
glyph_map=self._glyph_map
text2path = self._text2path
color = rgb2hex(gc.get_rgb())
fontsize = prop.get_size_in_points()
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = six.text_type(gc.get_alpha())
if not ismath:
font = text2path._get_font(prop)
_glyphs = text2path.get_glyphs_with_font(
font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
attrib['style'] = generate_css(style)
font_scale = fontsize / text2path.FONT_SCALE
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for glyph_id, xposition, yposition, scale in glyph_info:
attrib={'xlink:href': '#%s' % glyph_id}
if xposition != 0.0:
attrib['x'] = six.text_type(xposition)
if yposition != 0.0:
attrib['y'] = six.text_type(yposition)
writer.element(
'use',
attrib=attrib)
writer.end('g')
else:
if ismath == "TeX":
_glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
else:
_glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
# we store the character glyphs w/o flipping. Instead, the
# coordinate will be flipped when this characters are
# used.
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
char_id = self._adjust_char_id(char_id)
# Some characters are blank
if not len(glyph_path[0]):
path_data = ""
else:
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
font_scale = fontsize / text2path.FONT_SCALE
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for char_id, xposition, yposition, scale in glyph_info:
char_id = self._adjust_char_id(char_id)
writer.element(
'use',
transform=generate_transform([
('translate', (xposition, yposition)),
('scale', (scale,)),
]),
attrib={'xlink:href': '#%s' % char_id})
for verts, codes in rects:
path = Path(verts, codes)
path_data = self._convert_path(path, simplify=False)
writer.element('path', d=path_data)
writer.end('g')
def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
writer = self.writer
color = rgb2hex(gc.get_rgb())
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = six.text_type(gc.get_alpha())
if not ismath:
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fontsize = prop.get_size_in_points()
fontfamily = font.family_name
fontstyle = prop.get_style()
attrib = {}
# Must add "px" to workaround a Firefox bug
style['font-size'] = six.text_type(fontsize) + 'px'
style['font-family'] = six.text_type(fontfamily)
style['font-style'] = prop.get_style().lower()
style['font-weight'] = six.text_type(prop.get_weight()).lower()
attrib['style'] = generate_css(style)
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# If text anchoring can be supported, get the original
# coordinates and add alignment information.
# Get anchor coordinates.
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
ay = self.height - ay
# Don't do vertical anchor alignment. Most applications do not
# support 'alignment-baseline' yet. Apply the vertical layout
# to the anchor point manually for now.
angle_rad = angle * np.pi / 180.
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
ha_mpl_to_svg = {'left': 'start', 'right': 'end',
'center': 'middle'}
style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]
attrib['x'] = str(ax)
attrib['y'] = str(ay)
attrib['style'] = generate_css(style)
attrib['transform'] = "rotate(%f, %f, %f)" % (-angle, ax, ay)
writer.element('text', s, attrib=attrib)
else:
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
writer.element('text', s, attrib=attrib)
if rcParams['svg.fonttype'] == 'svgfont':
fontset = self._fonts.setdefault(font.fname, set())
for c in s:
fontset.add(ord(c))
else:
writer.comment(s)
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
attrib = {}
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
# Apply attributes to 'g', not 'text', because we likely
# have some rectangles as well with the same style and
# transformation
writer.start('g', attrib=attrib)
writer.start('text')
# Sort the characters by font, and output one tspan for
# each
spans = {}
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
style = generate_css({
'font-size': six.text_type(fontsize) + 'px',
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'font-weight': font.style_name.lower()})
if thetext == 32:
thetext = 0xa0 # non-breaking space
spans.setdefault(style, []).append((new_x, -new_y, thetext))
if rcParams['svg.fonttype'] == 'svgfont':
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
fontset = self._fonts.setdefault(font.fname, set())
fontset.add(thetext)
for style, chars in list(six.iteritems(spans)):
chars.sort()
same_y = True
if len(chars) > 1:
last_y = chars[0][1]
for i in xrange(1, len(chars)):
if chars[i][1] != last_y:
same_y = False
break
if same_y:
ys = six.text_type(chars[0][1])
else:
ys = ' '.join(six.text_type(c[1]) for c in chars)
attrib = {
'style': style,
'x': ' '.join(six.text_type(c[0]) for c in chars),
'y': ys
}
writer.element(
'tspan',
''.join(unichr(c[2]) for c in chars),
attrib=attrib)
writer.end('text')
if len(svg_rects):
for x, y, width, height in svg_rects:
writer.element(
'rect',
x=six.text_type(x), y=six.text_type(-y + height),
width=six.text_type(width), height=six.text_type(height)
)
writer.end('g')
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
clipid = self._get_clip(gc)
if clipid is not None:
# Cannot apply clip-path directly to the text, because
# is has a transformation
self.writer.start(
'g', attrib={'clip-path': 'url(#%s)' % clipid})
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
if rcParams['svg.fonttype'] == 'path':
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
else:
self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)
if gc.get_url() is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
return self._text2path.get_text_width_height_descent(s, prop, ismath)
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
fixed_dpi = 72
def print_svg(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = svgwriter = io.open(filename, 'w', encoding='utf-8')
elif is_writable_file_like(filename):
if not isinstance(filename, io.TextIOBase):
if six.PY3:
svgwriter = io.TextIOWrapper(filename, 'utf-8')
else:
svgwriter = codecs.getwriter('utf-8')(filename)
else:
svgwriter = filename
fh_to_close = None
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close, **kwargs)
def print_svgz(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(filename, 'w')
svgwriter = io.TextIOWrapper(gzipwriter, 'utf-8')
elif is_writable_file_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(fileobj=filename, mode='w')
svgwriter = io.TextIOWrapper(gzipwriter, 'utf-8')
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close)
def _print_svg(self, filename, svgwriter, fh_to_close=None, **kwargs):
try:
image_dpi = kwargs.pop("dpi", 72)
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width*72, height*72
if rcParams['svg.image_noscale']:
renderer = RendererSVG(w, h, svgwriter, filename, image_dpi)
else:
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure,
width, height, image_dpi, RendererSVG(w, h, svgwriter, filename, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if fh_to_close is not None:
svgwriter.close()
def get_default_filetype(self):
return 'svg'
class FigureManagerSVG(FigureManagerBase):
pass
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasSVG(figure)
manager = FigureManagerSVG(canvas, num)
return manager
svgProlog = """\
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (http://matplotlib.org/) -->
"""
FigureCanvas = FigureCanvasSVG
FigureManager = FigureManagerSVG
| apache-2.0 |
Eric89GXL/mne-python | mne/utils/numerics.py | 4 | 36095 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import hashlib
from io import BytesIO, StringIO
from math import sqrt
import numbers
import operator
import os
import os.path as op
from math import ceil
import shutil
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
from scipy import sparse
from ._logging import logger, warn, verbose
from .check import check_random_state, _ensure_int, _validate_type
from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd
from .docs import fill_doc
def split_list(v, n, idx=False):
"""Split list in n (approx) equal pieces, possibly giving indices."""
n = int(n)
tot = len(v)
sz = tot // n
start = stop = 0
for i in range(n - 1):
stop += sz
yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop]
start += sz
yield (np.arange(start, tot), v[start:]) if idx else v[start]
def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):
"""Do what numpy.array_split does, but add indices."""
# this only works for indices_or_sections as int
indices_or_sections = _ensure_int(indices_or_sections)
ary_split = np.array_split(ary, indices_or_sections, axis=axis)
idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)
idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)
for sp in idx_split)
return zip(idx_split, ary_split)
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found.
Returns
-------
value : float
Sum of squares of the input array X.
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reg_pinv(x, reg=0, rank='full', rcond=1e-15):
"""Compute a regularized pseudoinverse of Hermitian matrices.
Regularization is performed by adding a constant value to each diagonal
element of the matrix before inversion. This is known as "diagonal
loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``.
The pseudo-inverse is computed through SVD decomposition and inverting the
singular values. When the matrix is rank deficient, some singular values
will be close to zero and will not be used during the inversion. The number
of singular values to use can either be manually specified or automatically
estimated.
Parameters
----------
x : ndarray, shape (..., n, n)
Square, Hermitian matrices to invert.
reg : float
Regularization parameter. Defaults to 0.
rank : int | None | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
Defaults to 'full'.
rcond : float | 'auto'
Cutoff for detecting small singular values when attempting to estimate
the rank of the matrix (``rank='auto'``). Singular values smaller than
the cutoff are set to zero. When set to 'auto', a cutoff based on
floating point precision will be used. Defaults to 1e-15.
Returns
-------
x_inv : ndarray, shape (..., n, n)
The inverted matrix.
loading_factor : float
Value added to the diagonal of the matrix during regularization.
rank : int
If ``rank`` was set to an integer value, this value is returned,
else the estimated rank of the matrix, before regularization, is
returned.
"""
from ..rank import _estimate_rank_from_s
if rank is not None and rank != 'full':
rank = int(operator.index(rank))
if x.ndim < 2 or x.shape[-2] != x.shape[-1]:
raise ValueError('Input matrix must be square.')
if not np.allclose(x, x.conj().swapaxes(-2, -1)):
raise ValueError('Input matrix must be Hermitian (symmetric)')
assert x.ndim >= 2 and x.shape[-2] == x.shape[-1]
n = x.shape[-1]
# Decompose the matrix, not necessarily positive semidefinite
from mne.fixes import svd
U, s, Vh = svd(x, hermitian=True)
# Estimate the rank before regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_before = _estimate_rank_from_s(s, tol)
# Decompose the matrix again after regularization
loading_factor = reg * np.mean(s, axis=-1)
if reg:
U, s, Vh = svd(
x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n),
hermitian=True)
# Estimate the rank after regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_after = _estimate_rank_from_s(s, tol)
# Warn the user if both all parameters were kept at their defaults and the
# matrix is rank deficient.
if (rank_after < n).any() and reg == 0 and \
rank == 'full' and rcond == 1e-15:
warn('Covariance matrix is rank-deficient and no regularization is '
'done.')
elif isinstance(rank, int) and rank > n:
raise ValueError('Invalid value for the rank parameter (%d) given '
'the shape of the input matrix (%d x %d).' %
(rank, x.shape[0], x.shape[1]))
# Pick the requested number of singular values
mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,))
if rank is None:
cmp = ret = rank_before
elif rank == 'full':
cmp = rank_after
ret = rank_before
else:
cmp = ret = rank
mask = mask < np.asarray(cmp)[..., np.newaxis]
mask &= s > 0
# Invert only non-zero singular values
s_inv = np.zeros(s.shape)
s_inv[mask] = 1. / s[mask]
# Compute the pseudo inverse
x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh)
return x_inv, loading_factor, ret
def _gen_events(n_epochs):
"""Generate event structure from number of epochs."""
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
return events
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from ..epochs import _is_good
from ..io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from .. import Evoked
from ..time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
@fill_doc
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
%(random_state)s
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
# This can't just be rng.permutation(n_samples) because it's not identical
# to what MATLAB produces
idx = rng.uniform(size=n_samples)
randperm = np.argsort(idx)
return randperm
@verbose
def _apply_scaling_array(data, picks_list, scalings, verbose=None):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
logger.debug(' Scaling using mapping %s.' % (scalings,))
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
logger.debug(' Scaling using computed norms.')
data *= scalings[:, np.newaxis] # F - order
def _invert_scalings(scalings):
if isinstance(scalings, dict):
scalings = {k: 1. / v for k, v in scalings.items()}
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return scalings
def _undo_scaling_array(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_array(data, picks_list, scalings, verbose=False)
@contextmanager
def _scaled_array(data, picks_list, scalings):
"""Scale, use, unscale array."""
_apply_scaling_array(data, picks_list=picks_list, scalings=scalings)
try:
yield
finally:
_undo_scaling_array(data, picks_list=picks_list, scalings=scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, str) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20
"""Calculate the hash for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
if hash_type == "md5":
hasher = hashlib.md5()
elif hash_type == "sha1":
hasher = hashlib.sha1()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def _replace_md5(fname):
"""Replace a file based on MD5sum."""
# adapted from sphinx-gallery
assert fname.endswith('.new')
fname_old = fname[:-4]
if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):
os.remove(fname)
else:
shutil.move(fname, fname_old)
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,
include_tmax=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
extra = '' if include_tmax else 'when include_tmax=False '
raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
return mask
def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):
"""Safely find frequency boundaries."""
orig_fmin = fmin
orig_fmax = fmax
fmin = -np.inf if fmin is None else fmin
fmax = np.inf if fmax is None else fmax
if not np.isfinite(fmin):
fmin = freqs[0]
if not np.isfinite(fmax):
fmax = freqs[-1]
if sfreq is None:
raise ValueError('sfreq can not be None')
# Push 0.5/sfreq past the nearest frequency boundary first
sfreq = float(sfreq)
fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq
fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and fmin > fmax:
raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'
% (orig_fmin, orig_fmax))
mask = (freqs >= fmin)
mask &= (freqs <= fmax)
if raise_error and not mask.any():
raise ValueError('No frequencies remain when using fmin=%s and '
'fmax=%s (original frequency bounds are [%s, %s])'
% (orig_fmin, orig_fmax, freqs[0], freqs[-1]))
return mask
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list of Evoked or AverageTFR data.
For :class:`mne.Evoked` data, the function interpolates bad channels based
on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True,
the grand average file will contain good channels and the bad channels
interpolated from the good MEG/EEG channels.
For :class:`mne.time_frequency.AverageTFR` data, the function takes the
subset of channels not marked as bad in any of the instances.
The ``grand_average.nave`` attribute will be equal to the number
of evoked datasets used to calculate the grand average.
.. note:: A grand average evoked should not be used for source
localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from ..evoked import Evoked
from ..time_frequency import AverageTFR
from ..channels.channels import equalize_channels
if not all_inst:
raise ValueError('Please pass a list of Evoked or AverageTFR objects.')
elif len(all_inst) == 1:
warn('Only a single dataset was passed to mne.grand_average().')
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
from ..evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from ..time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list({b for inst in all_inst for b in inst.info['bads']})
if bads:
for inst in all_inst:
inst.drop_channels(bads)
equalize_channels(all_inst, copy=False)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (str, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tobytes())
elif isinstance(x, datetime):
object_hash(_dt_to_stamp(x))
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x, memo=None):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
memo : dict | None
The memodict.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if memo is None:
memo = dict()
id_ = id(x)
if id_ in memo:
return 0 # do not add already existing ones
if isinstance(x, (bytes, str, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([]))
if x.base is None or id(x.base) not in memo:
size += x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key, memo)
size += object_size(value, memo)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x)
elif isinstance(x, datetime):
size = object_size(_dt_to_stamp(x), memo)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
memo[id_] = size
return size
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def _array_equal_nan(a, b):
try:
np.testing.assert_array_equal(a, b)
except AssertionError:
return False
return True
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
# Deal with NamedInt and NamedFloat
for sub in (int, float):
if isinstance(a, sub) and isinstance(b, sub):
break
else:
return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
if isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key],
pre=(pre + '[%s]' % repr(key)))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, float):
if not _array_equal_nan(a, b):
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif isinstance(a, (str, int, bytes, np.generic)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not _array_equal_nan(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif isinstance(a, datetime):
if (a - b).total_seconds() != 0:
out += pre + ' datetime mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif hasattr(a, '__getstate__'):
out += object_diff(a.__getstate__(), b.__getstate__(), pre)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _PCA(object):
"""Principal component analysis (PCA)."""
# Adapted from sklearn and stripped down to just use linalg.svd
# and make it easier to later provide a "center" option if we want
def __init__(self, n_components=None, whiten=False):
self.n_components = n_components
self.whiten = whiten
def fit_transform(self, X, y=None):
X = X.copy()
U, S, _ = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = _safe_svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _mask_to_onsets_offsets(mask):
"""Group boolean mask into contiguous onset:offset pairs."""
assert mask.dtype == bool and mask.ndim == 1
mask = mask.astype(int)
diff = np.diff(mask)
onsets = np.where(diff > 0)[0] + 1
if mask[0]:
onsets = np.concatenate([[0], onsets])
offsets = np.where(diff < 0)[0] + 1
if mask[-1]:
offsets = np.concatenate([offsets, [len(mask)]])
assert len(onsets) == len(offsets)
return onsets, offsets
def _julian_to_dt(jd):
"""Convert Julian integer to a datetime object.
Parameters
----------
jd : int
Julian date - number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
Returns
-------
jd_date : datetime
Datetime representation of jd
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = timedelta(days=(jd - jd_t0))
return datetime_t0 + dt
def _dt_to_julian(jd_date):
"""Convert datetime object to a Julian integer.
Parameters
----------
jd_date : datetime
Returns
-------
jd : float
Julian date corresponding to jd_date
- number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = jd_date - datetime_t0
return jd_t0 + dt.days
def _cal_to_julian(year, month, day):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd: int
Julian date.
"""
return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0,
tzinfo=timezone.utc)))
def _julian_to_cal(jd):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
jd: int, float
Julian date.
Returns
-------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
"""
tmp_date = _julian_to_dt(jd)
return tmp_date.year, tmp_date.month, tmp_date.day
def _check_dt(dt):
if not isinstance(dt, datetime) or dt.tzinfo is None or \
dt.tzinfo is not timezone.utc:
raise ValueError('Date must be datetime object in UTC: %r' % (dt,))
def _dt_to_stamp(inp_date):
"""Convert a datetime object to a timestamp."""
_check_dt(inp_date)
return int(inp_date.timestamp() // 1), inp_date.microsecond
def _stamp_to_dt(utc_stamp):
"""Convert timestamp to datetime object in Windows-friendly way."""
# The min on windows is 86400
stamp = [int(s) for s in utc_stamp]
if len(stamp) == 1: # In case there is no microseconds information
stamp.append(0)
return (datetime.fromtimestamp(0, tz=timezone.utc) +
timedelta(0, stamp[0], stamp[1])) # day, sec, µs
class _ReuseCycle(object):
"""Cycle over a variable, preferring to reuse earlier indices.
Requires the values in ``x`` to be hashable and unique. This holds
nicely for matplotlib's color cycle, which gives HTML hex color strings.
"""
def __init__(self, x):
self.indices = list()
self.popped = dict()
assert len(x) > 0
self.x = x
def __iter__(self):
while True:
yield self.__next__()
def __next__(self):
if not len(self.indices):
self.indices = list(range(len(self.x)))
self.popped = dict()
idx = self.indices.pop(0)
val = self.x[idx]
assert val not in self.popped
self.popped[val] = idx
return val
def restore(self, val):
try:
idx = self.popped.pop(val)
except KeyError:
warn('Could not find value: %s' % (val,))
else:
loc = np.searchsorted(self.indices, idx)
self.indices.insert(loc, idx)
| bsd-3-clause |
alekz112/statsmodels | statsmodels/examples/ex_kernel_regression2.py | 34 | 1511 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
y_true = np.sin(x*5)/x + 2*x
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
model = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls',
defaults=nparam.EstimatorSettings(efficient=True))
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
model1 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls')
mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='ll',
var_type='c', bw='cv_ls')
mean2, mfx2 = model2.fit()
print(model.bw)
print(model1.bw)
print(model2.bw)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean, lw=2, label='kernel mean')
ax.plot(x, mean2, lw=2, label='kernel mean')
ax.legend()
plt.show()
| bsd-3-clause |
ChristopherGS/sensor_readings | ML_Sandbox/feature_engineering.py | 1 | 5687 | import numpy as np
import pandas as pd
import os
import numpy.fft as fft
import scipy.fftpack
import config
def rolling_average(df):
return pd.rolling_mean(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_median(df):
return pd.rolling_median(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_max(df):
return pd.rolling_max(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_min(df):
return pd.rolling_min(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean()
def standard_deviation(df):
return df.std()
def max_min_dif(df):
diff = (pd.rolling_min(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean()) - (pd.rolling_max(df, window=config.TIME_SEQUENCE_LENGTH-2, center=True).mean())
return diff
def create_rm_feature(df, sequence_length):
features = []
original_x = df['ACCEL_X'].astype(float)
original_y = df['ACCEL_Y'].astype(float)
original_z = df['ACCEL_Z'].astype(float)
original_gx = df['GYRO_X'].astype(float)
original_gy = df['GYRO_Y'].astype(float)
original_gz = df['GYRO_Z'].astype(float)
i = original_x.index.values
time_sequence = [i] * sequence_length
# The acutal number of 'i' values in the np.array is the sequence_length
idx = np.array(time_sequence).T.flatten()[:len(original_x)]
x = original_x.groupby(idx).mean()
x.name = 'ACCEL_X'
features.append(x)
y = original_y.groupby(idx).mean()
y.name = 'ACCEL_Y'
features.append(y)
z = original_z.groupby(idx).mean()
z.name = 'ACCEL_Z'
features.append(z)
gx = original_gx.groupby(idx).mean()
gx.name = 'GYRO_X'
features.append(gx)
gy = original_gy.groupby(idx).mean()
gy.name = 'GYRO_Y'
features.append(gy)
gz = original_gz.groupby(idx).mean()
gz.name = 'GYRO_Z'
features.append(gz)
#rolling median
x_ra = df['ACCEL_X'].groupby(idx).apply(rolling_median)
x_ra.name = 'rolling_median_x'
features.append(x_ra)
y_ra = df['ACCEL_Y'].groupby(idx).apply(rolling_median)
y_ra.name = 'rolling_median_y'
features.append(y_ra)
z_ra = df['ACCEL_Z'].groupby(idx).apply(rolling_median)
z_ra.name = 'rolling_median_z'
features.append(z_ra)
#gx_ra = df['GYRO_X'].groupby(idx).apply(rolling_median)
#gx_ra.name = 'rolling_median_gx'
#features.append(gx_ra)
#gy_ra = df['GYRO_Y'].groupby(idx).apply(rolling_median)
#gy_ra.name = 'rolling_median_gy'
#features.append(gy_ra)
#gz_ra = df['GYRO_Z'].groupby(idx).apply(rolling_median)
#gz_ra.name = 'rolling_median_gz'
#features.append(gz_ra)
#rolling max
x_rm = df['ACCEL_X'].groupby(idx).apply(rolling_max)
x_rm.name = 'rolling_max_x'
features.append(x_rm)
y_rm = df['ACCEL_Y'].groupby(idx).apply(rolling_max)
y_rm.name = 'rolling_max_y'
features.append(y_rm)
z_rm = df['ACCEL_Z'].groupby(idx).apply(rolling_max)
z_rm.name = 'rolling_max_z'
features.append(z_rm)
#gx_rm = df['GYRO_X'].groupby(idx).apply(rolling_max)
#gx_rm.name = 'rolling_max_gx'
#features.append(gx_rm)
#gy_rm = df['GYRO_Y'].groupby(idx).apply(rolling_max)
#gy_rm.name = 'rolling_max_gy'
#features.append(gy_rm)
#gz_rm = df['GYRO_Z'].groupby(idx).apply(rolling_max)
#gz_rm.name = 'rolling_max_gz'
#features.append(gz_rm)
#rolling min
x_rmin = df['ACCEL_X'].groupby(idx).apply(rolling_min)
x_rmin.name = 'rolling_min_x'
features.append(x_rmin)
y_rmin = df['ACCEL_Y'].groupby(idx).apply(rolling_min)
y_rmin.name = 'rolling_min_y'
features.append(y_rmin)
z_rmin = df['ACCEL_Z'].groupby(idx).apply(rolling_min)
z_rmin.name = 'rolling_min_z'
features.append(z_rmin)
#gx_rmin = df['GYRO_X'].groupby(idx).apply(rolling_min)
#gx_rmin.name = 'rolling_min_gx'
#features.append(gx_rmin)
#gy_rmin = df['GYRO_Y'].groupby(idx).apply(rolling_min)
#gy_rmin.name = 'rolling_min_gy'
#features.append(gy_rmin)
#gz_rmin = df['GYRO_Z'].groupby(idx).apply(rolling_min)
#gz_rmin.name = 'rolling_min_gz'
#features.append(gz_rmin)
#standard deviation
x_std = df['ACCEL_X'].groupby(idx).apply(standard_deviation)
x_std.name = 'std_x'
features.append(x_std)
y_std = df['ACCEL_Y'].groupby(idx).apply(standard_deviation)
y_std.name = 'std_y'
features.append(y_std)
z_std = df['ACCEL_Z'].groupby(idx).apply(standard_deviation)
z_std.name = 'std_z'
features.append(z_std)
gx_std = df['GYRO_X'].groupby(idx).apply(standard_deviation)
gx_std.name = 'std_gx'
features.append(gx_std)
gy_std = df['GYRO_Y'].groupby(idx).apply(standard_deviation)
gy_std.name = 'std_gy'
features.append(gy_std)
gz_std = df['GYRO_Z'].groupby(idx).apply(standard_deviation)
gz_std.name = 'std_gz'
features.append(gz_std)
# Max min diff
x_diff = df['ACCEL_X'].groupby(idx).apply(max_min_dif)
x_diff.name = 'diff_x'
features.append(x_diff)
y_diff = df['ACCEL_Y'].groupby(idx).apply(max_min_dif)
y_diff.name = 'diff_y'
features.append(y_diff)
z_diff = df['ACCEL_Z'].groupby(idx).apply(max_min_dif)
z_diff.name = 'diff_z'
features.append(z_diff)
gx_diff = df['GYRO_X'].groupby(idx).apply(max_min_dif)
gx_diff.name = 'diff_gx'
features.append(gx_diff)
gy_diff = df['GYRO_Y'].groupby(idx).apply(max_min_dif)
gy_diff.name = 'diff_gy'
features.append(gy_diff)
gz_diff = df['GYRO_Z'].groupby(idx).apply(max_min_dif)
gz_diff.name = 'diff_gz'
features.append(gz_diff)
data = pd.concat(features, axis=1)
return data | bsd-3-clause |
apevec/RMS | Utils/StackImgs.py | 2 | 2217 | """ Stacks all images in the given folder to one image. """
from __future__ import print_function, division, absolute_import
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from RMS.Routines.Image import loadImage, saveImage
from Utils.StackFFs import deinterlaceBlend, blendLighten
if __name__ == '__main__':
### COMMAND LINE ARGUMENTS
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Stacks all images of the given type in the given folder.")
arg_parser.add_argument('dir_path', nargs=1, metavar='DIR_PATH', type=str, \
help='Path to directory with FF files.')
arg_parser.add_argument('input_file_format', nargs=1, metavar='INPUT_FILE_FORMAT', type=str, \
help='File format of input images, e.g. jpg or png.')
arg_parser.add_argument('output_file_format', nargs=1, metavar='OUTPUT_FILE_FORMAT', type=str, \
help='File format of the output stacked image, e.g. jpg or png.')
arg_parser.add_argument('-d', '--deinterlace', action="store_true", help="""Deinterlace the image before stacking. """)
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
dir_path = cml_args.dir_path[0]
first_img = True
# List all FF files in the current dir
for ff_name in os.listdir(dir_path):
if ff_name.endswith('.' + cml_args.input_file_format[0]):
print('Stacking: ', ff_name)
# Load the image
img = loadImage(os.path.join(dir_path, ff_name), -1)
# Deinterlace the image
if cml_args.deinterlace:
img = deinterlaceBlend(img)
if first_img:
merge_img = np.copy(img)
first_img = False
continue
# Blend images 'if lighter'
merge_img = blendLighten(merge_img, img)
stack_path = os.path.join(dir_path, 'stacked.' + cml_args.output_file_format[0])
print("Saving to:", stack_path)
# Save the blended image
saveImage(stack_path, merge_img)
# Plot the blended image
plt.imshow(merge_img, cmap='gray')
plt.show()
| gpl-3.0 |
wlamond/scikit-learn | examples/applications/svm_gui.py | 124 | 11251 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
try:
import tkinter as Tk
except ImportError:
# Backward compat for Python 2
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
jimgoo/zipline-fork | tests/test_transforms_talib.py | 12 | 6304 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numpy as np
import pandas as pd
import talib
from datetime import timedelta, datetime
from unittest import TestCase, skip
from zipline.utils.test_utils import setup_logger, teardown_logger
import zipline.utils.factory as factory
from zipline.finance.trading import TradingEnvironment
from zipline.test_algorithms import TALIBAlgorithm
import zipline.transforms.ta as ta
class TestTALIB(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
setup_logger(self)
sim_params = factory.create_simulation_parameters(
start=datetime(1990, 1, 1, tzinfo=pytz.utc),
end=datetime(1990, 3, 30, tzinfo=pytz.utc))
self.source, self.panel = \
factory.create_test_panel_ohlc_source(sim_params, self.env)
def tearDown(self):
teardown_logger(self)
@skip
def test_talib_with_default_params(self):
BLACKLIST = ['make_transform', 'BatchTransform',
# TODO: Figure out why MAVP generates a KeyError
'MAVP']
names = [name for name in dir(ta)
if name[0].isupper() and name not in BLACKLIST]
for name in names:
print(name)
zipline_transform = getattr(ta, name)(sid=0)
talib_fn = getattr(talib.abstract, name)
start = datetime(1990, 1, 1, tzinfo=pytz.utc)
end = start + timedelta(days=zipline_transform.lookback + 10)
sim_params = factory.create_simulation_parameters(
start=start, end=end)
source, panel = \
factory.create_test_panel_ohlc_source(sim_params, self.env)
algo = TALIBAlgorithm(talib=zipline_transform)
algo.run(source)
zipline_result = np.array(
algo.talib_results[zipline_transform][-1])
talib_data = dict()
data = zipline_transform.window
# TODO: Figure out if we are clobbering the tests by this
# protection against empty windows
if not data:
continue
for key in ['open', 'high', 'low', 'volume']:
if key in data:
talib_data[key] = data[key][0].values
talib_data['close'] = data['price'][0].values
expected_result = talib_fn(talib_data)
if isinstance(expected_result, list):
expected_result = np.array([e[-1] for e in expected_result])
else:
expected_result = np.array(expected_result[-1])
if not (np.all(np.isnan(zipline_result)) and
np.all(np.isnan(expected_result))):
self.assertTrue(np.allclose(zipline_result, expected_result))
else:
print('--- NAN')
# reset generator so next iteration has data
# self.source, self.panel = \
# factory.create_test_panel_ohlc_source(self.sim_params)
def test_multiple_talib_with_args(self):
zipline_transforms = [ta.MA(timeperiod=10),
ta.MA(timeperiod=25)]
talib_fn = talib.abstract.MA
algo = TALIBAlgorithm(talib=zipline_transforms, identifiers=[0])
algo.run(self.source)
# Test if computed values match those computed by pandas rolling mean.
sid = 0
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[0]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
10).values)
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[1]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
25).values)
for t in zipline_transforms:
talib_result = np.array(algo.talib_results[t][-1])
talib_data = dict()
data = t.window
# TODO: Figure out if we are clobbering the tests by this
# protection against empty windows
if not data:
continue
for key in ['open', 'high', 'low', 'volume']:
if key in data:
talib_data[key] = data[key][0].values
talib_data['close'] = data['price'][0].values
expected_result = talib_fn(talib_data, **t.call_kwargs)[-1]
np.testing.assert_allclose(talib_result, expected_result)
def test_talib_with_minute_data(self):
ma_one_day_minutes = ta.MA(timeperiod=10, bars='minute')
# Assert that the BatchTransform window length is enough to cover
# the amount of minutes in the timeperiod.
# Here, 10 minutes only needs a window length of 1.
self.assertEquals(1, ma_one_day_minutes.window_length)
# With minutes greater than the 390, i.e. one trading day, we should
# have a window_length of two days.
ma_two_day_minutes = ta.MA(timeperiod=490, bars='minute')
self.assertEquals(2, ma_two_day_minutes.window_length)
# TODO: Ensure that the lookback into the datapanel is returning
# expected results.
# Requires supplying minute instead of day data to the unit test.
# When adding test data, should add more minute events than the
# timeperiod to ensure that lookback is behaving properly.
| apache-2.0 |
boland1992/seissuite_iran | build/lib.linux-x86_64-2.7/seissuite/spectrum/heat_pickle.py | 8 | 25534 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import shapefile
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from scipy import interpolate
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
import itertools
from scipy.interpolate import griddata
import random
from sklearn.cluster import DBSCAN
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
class InPoly:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. The class uses
the matplotlib Path class.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise boundary polygon nodes
self.nodes = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def poly_nodes(self):
"""
Function that returns the nodes of a shapefile as a (n,2) array.
"""
sf = shapefile.Reader(self.boundary)
poly = sf.shapes()[0]
#find polygon nodes lat lons
self.nodes = np.asarray(poly.points)
return self.nodes
def points_from_path(self, poly):
"""
Function that returns nodes from matplotlib Path object.
"""
return poly.vertices
def shapefile_poly(self):
"""
Function that imports a shapefile location path and returns
a matplotlib Path object representing this shape.
"""
self.nodes = self.poly_nodes()
#convert to a matplotlib path class!
self.polygon = Path(self.nodes)
return self.polygon
def node_poly(self, nodes):
"""
Function creates a matplotlib Path object from input nodes.
"""
#convert to a matplotlib path class!
polygon = Path(nodes)
return polygon
def points_in_shapefile_poly(self):
"""
Function that takes a single (2,1) coordinate input, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon.
"""
self.polygon = self.shapefile_poly()
points_in = self.polygon.contains_points(self.dots)
self.output = self.dots[points_in == True]
return np.asarray(self.output)
def points_in(self, points, poly=None, IN=True, indices=False):
"""
Function that takes a many (2,N) points, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon. If IN=True then the function will return points inside
the matplotlib Path object, else if IN=False then the function will
return the points outside the matplotlib Path object.
"""
if poly is None:
poly = self.shapefile_poly()
points_test = poly.contains_points(points)
if indices:
return points_test
else:
output = points[points_test == IN]
return np.asarray(output)
def bounds_poly(self, nodes=None):
"""
Function that returns boundaries of a shapefile polygon.
"""
if nodes is None:
nodes = self.poly_nodes()
xmin, xmax = np.min(nodes[:,0]), np.max(nodes[:,0])
ymin, ymax = np.min(nodes[:,1]), np.max(nodes[:,1])
return xmin, xmax, ymin, ymax
def poly_from_shape(self, shape=None, size=1., res=1):
"""
Function that returns a matplotlib Path object from
buffered shape points. if shape != None then the shape input
MUST be of type shapely polygon.
"""
SHAPE = InShape(self.boundary)
if shape is None:
# Generates shape object from shape_file input
shape = SHAPE
return self.node_poly(shape.external_coords(size=size, res=res))
else:
return self.node_poly(SHAPE.external_coords(shape=shape))
def rand_poly(self, poly=None, N=1e4, IN=True):
"""
Function that takes an input matplotlib Path object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Path object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else if
IN=False then the function will return the points outside the
matplotlib Path object.
"""
if poly is None:
#poly = self.shapefile_poly()
xmin, xmax, ymin, ymax = self.bounds_poly()
else:
nodes = self.points_from_path(poly)
xmin, xmax, ymin, ymax = self.bounds_poly(nodes=nodes)
X = abs(xmax - xmin) * np.random.rand(N,1) + xmin
Y = abs(ymax - ymin) * np.random.rand(N,1) + ymin
many_points = np.column_stack((X,Y))
many_points = self.points_in(many_points, poly=poly, IN=IN)
return many_points
def rand_shape(self, shape=None, N=1e4, IN=True):
"""
Function that takes an input shapely Polygon object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Polygon object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else
if IN=False then the function will return the points outside
the matplotlib Path object.
"""
if shape is None:
# Generates shape object from shape_file input
INSHAPE = InShape(self.boundary)
shape = self.node_poly(INSHAPE.external_coords())
xmin, xmax, ymin, ymax = INSHAPE.shape_bounds()
poly = self.node_poly(SHAPE.external_coords(shape=shape))
points = self.rand_poly(poly=poly, N=N, IN=IN)
return points
class Geodesic:
"""
Class defined in order to create to process points, distances and
other related geodesic calculations and functions
"""
def __init__(self, period_range=[1, 40], km_point=20., max_dist=2e3):
# initialise period_range as [1,40] default for ambient noise
self.per_range = period_range
self.km = km_point
self.max_dist = max_dist
def remove_distance(self, period_range, max_dist=None):
"""
Function that returns a given possible resolvable ambient noise
structure distance range, given the maximum period range
availabe to the study. The distance returned is in km.
Maximum distance default can be reassigned based on the cut-off found
by your time-lag plots for your study!
"""
if max_dist is None:
max_dist = self.max_dist
if type(period_range) == list:
min_dist = min(period_range) * 9
return [min_dist, max_dist]
elif type(period_range) == int or float:
return [period_range*9, max_dist]
def haversine(self, lon1, lat1, lon2, lat2, R=6371):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees). R is radius of
spherical earth. Default is 6371km.
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = R * c
return km
def fast_geodesic(self, lon1, lat1, lon2, lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def paths_calc(self, path_info, km_points=None, per_lims=None):
"""
Function that returns an array of coordinates equidistant along
a great cricle path between two lat-lon coordinates if these points
lay within a certain distance range ... otherwise the points return
only a set of zeros the same size as the array. Default is 1.0km
distance per point.
"""
if per_lims is None:
# if no new default for period limits is defined, then set the
# limit to the default.
per_lims = self.per_range
if km_points is None:
km_points = self.km
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
# interpoint distance <= 1 km, and nb of points >= 100
dist = self.haversine(lon1, lat1, lon2, lat2)
npts = max(int((np.ceil(dist) + 1) / km_points), 100)
path = self.fast_geodesic(lon1, lat1, lon2, lat2, npts)
dist_range = self.remove_distance(per_lims)
if min(dist_range) < dist < max(dist_range):
#remove the closest points along this line that fall below the distance
#find the index of the first point that is above this distance away!
pts_km = npts / float((np.ceil(dist) + 1)) #this gives pts/km
#remove all points below this index in the paths list
dist_index = pts_km * min(dist_range)
path = path[dist_index:]
return path
else:
return np.zeros_like(path)
def fast_paths(self, coord_list):
"""
Function that takes many point coordinate combinations and quickly
passes them through the paths_calc function. coord_list MUST be
of the shape (4, N) whereby each coordinate combination is in a
(4,1) row [lon1,lat1,lon2,lat2].
"""
return map(self.paths_calc, coord_list)
def combine_paths(self, paths):
"""
Function that takes many paths (should be array of same length as
number of stations). This is automatically generated by parallelising
the fast_paths function above.
The output array should only contain unique, no repeating paths
and should be of the shape (2,N) where N is a large number of coords.
"""
#create a flattened numpy array of size 2xN from the paths created!
paths = list(itertools.chain(*paths))
paths = np.asarray(list(itertools.chain\
(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(paths).view(np.dtype\
((np.void, paths.dtype.itemsize * \
paths.shape[1])))
_, idx = np.unique(b, return_index=True)
paths = np.unique(b).view(paths.dtype)\
.reshape(-1, paths.shape[1])
return paths
def remove_zeros(self, paths):
"""
Function that processes the flattened path output from combine_paths
and removes the zero paths created by paths_calc. Remove zeroes
from paths to ensure all paths that were NOT in the distance threshold
are removed from the path density calculation!
"""
path_lons, path_lats = paths[:,0], paths[:,1]
FIND_ZERO1 = np.where(paths[:,0]==0)[0]
FIND_ZERO2 = np.where(paths[:,1]==0)[0]
if len(FIND_ZERO1) != 0 and len(FIND_ZERO2) != 0:
path_lons = np.delete(path_lons, FIND_ZERO1)
path_lats = np.delete(path_lats, FIND_ZERO2)
return np.column_stack((path_lons, path_lats))
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
GEODESIC = Geodesic()
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
INPOLY = InPoly(shape_path)
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/\
AUSTRALIA 2014/noiseinfo_comb.pickle'
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/AUSTRALIA 2014/first_peak_dict_australia_2014.pickle'
pickle_file0 = '/storage/ANT/spectral_density/noise_info0.pickle'
comb_noise = '/storage/ANT/spectral_density/station_pds_maxima/total_noise_combination.pickle'
f = open(name=comb_noise, mode='rb')
noise_info0 = pickle.load(f)
f.close()
# sort the noise
noise_info0 = np.asarray(noise_info0)
#noise_info0 = noise_info0[np.argsort(noise_info0[:, 1])]
# Combine AU with S info
print len(noise_info0)
# find outliers
def reject_outliers(data, m=0.5):
return data[abs(data - np.mean(data)) < m * np.std(data)]
outliers = reject_outliers(noise_info0[:,2])
# remove outliers
noise_info0 = np.asarray([info for info in noise_info0 \
if info[2] in outliers])
# filter coordinates that are too close together.
min_dist = 1. #degrees
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
# next remove points outside of the given poly if applicable
coord_indices = INPOLY.points_in(coords, indices=True)
noise_info0 = noise_info0[coord_indices == True]
print noise_info0
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
coord_combs = np.asarray(list(itertools.combinations(coords,2)))
print len(coord_combs)
def coord_combinations(coord_combs):
lon1, lat1 = coord_combs[0][0], coord_combs[0][1]
lon2, lat2 = coord_combs[1][0], coord_combs[1][1]
return [coord_combs, GEODESIC.haversine(lon1, lat1, lon2, lat2)]
t0 = datetime.datetime.now()
pool = mp.Pool()
comb_dists = pool.map(coord_combinations, coord_combs)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
comb_dists = np.asarray(comb_dists)
# sort by distance
comb_dists = comb_dists[np.argsort(comb_dists[:, 1])]
# find where the distances are less than the min_dist
find_min = np.where(comb_dists[:,1]>min_dist)[0]
# remove points where the distances are less than the min_dist
comb_dists = np.delete(comb_dists, find_min, axis=0)
remaining_coords = comb_dists[:,0]
# get unique coordinates from remaining coords
#paths = list(itertools.chain(*paths))
remaining_coords = np.asarray(list(itertools.chain\
(*remaining_coords)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(remaining_coords).view(np.dtype\
((np.void, remaining_coords.dtype.itemsize * \
remaining_coords.shape[1])))
_, idx = np.unique(b, return_index=True)
remaining_coords = np.unique(b).view(remaining_coords.dtype)\
.reshape(-1, remaining_coords.shape[1])
#scan for all points that are within a degree radius of one another!
db = DBSCAN(eps=min_dist).fit(coords)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
clusters = []
cluster_keep = []
for k in unique_labels:
if k != -1:
class_member_mask = (labels == k)
cluster = coords[class_member_mask & core_samples_mask]
#xy = coords[class_member_mask & ~core_samples_mask]
# Select only 1 random point from each cluster to keep. Remove all others!
clusters.append(cluster)
cluster_keep.append(cluster[random.randint(0,len(cluster)-1)])
cluster_keep = np.asarray(cluster_keep)
# flatten clusters array
clusters = np.asarray(list(itertools.chain(*clusters)))
# remove all points in clusters from the overall coords array
coords = np.asarray([coord for coord in coords if coord not in clusters])
# place single representative point from cluster back into overall coord list
coords = np.append(coords, cluster_keep, axis=0)
print len(noise_info0)
# remove cluster coordinates from noise_info0
noise_info0 = np.asarray([info for info in noise_info0 \
if info[0] in coords[:,0]])
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n Australian Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
print "number of station points: ", len(noise_info0)
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
x, y = noise_info0[:,0], noise_info0[:,1]
points = np.column_stack((x,y))
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
values = noise_info0[:,2]
#now we create a grid of values, interpolated from our random sample above
y = np.linspace(ymin, ymax, 200)
x = np.linspace(xmin, xmax, 200)
gridx, gridy = np.meshgrid(x, y)
heat_field = griddata(points, values, (gridx, gridy),
method='cubic',fill_value=0)
#heat_field = np.where(heat_field < 0, 1, heat_field)
heat_field = np.ma.masked_where(heat_field==0,heat_field)
print gridx
plt.pcolor(gridx, gridy, heat_field,
cmap='rainbow',alpha=0.5, norm=LogNorm(vmin=100, vmax=3e4),
zorder=2)
plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap='rainbow', zorder=3)
#cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
#sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
# norm=LogNorm(vmin=100, vmax=3e4), s=50, cmap=cm, zorder=2)
col = plt.colorbar()
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/noise_map_all.svg',
format='SVG')
| gpl-3.0 |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| gpl-3.0 |
plissonf/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
siutanwong/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/core/nanops.py | 1 | 51987 | from __future__ import annotations
import functools
import itertools
import operator
from typing import (
Any,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
NaT,
NaTType,
Timedelta,
iNaT,
lib,
)
from pandas._typing import (
ArrayLike,
Dtype,
DtypeObj,
F,
Scalar,
Shape,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import (
get_dtype,
is_any_int_dtype,
is_bool_dtype,
is_complex,
is_datetime64_any_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
notna,
)
from pandas.core.construction import extract_array
bn = import_optional_dependency("bottleneck", errors="warn")
_BOTTLENECK_INSTALLED = bn is not None
_USE_BOTTLENECK = False
def set_use_bottleneck(v: bool = True) -> None:
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option("compute.use_bottleneck"))
class disallow:
def __init__(self, *dtypes):
super().__init__()
self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
def check(self, obj) -> bool:
return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes)
def __call__(self, f: F) -> F:
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, kwargs.values())
if any(self.check(obj) for obj in obj_iter):
f_name = f.__name__.replace("nan", "")
raise TypeError(
f"reduction operation '{f_name}' not allowed for this dtype"
)
try:
with np.errstate(invalid="ignore"):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e) from e
raise
return cast(F, _f)
class bottleneck_switch:
def __init__(self, name=None, **kwargs):
self.name = name
self.kwargs = kwargs
def __call__(self, alt: F) -> F:
bn_name = self.name or alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
**kwds,
):
if len(self.kwargs) > 0:
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
if values.size == 0 and kwds.get("min_count") is None:
# We are empty, returning NA for our type
# Only applies for the default `min_count` of None
# since that affects how empty arrays are handled.
# TODO(GH-18976) update all the nanops methods to
# correctly handle empty inputs and remove this check.
# It *may* just be `var`
return _na_for_min_count(values, axis)
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
if kwds.get("mask", None) is None:
# `mask` is not recognised by bottleneck, would raise
# TypeError if called
kwds.pop("mask", None)
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return cast(F, f)
def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
# Bottleneck chokes on datetime64, PeriodDtype (or and EA)
if not is_object_dtype(dtype) and not needs_i8_conversion(dtype):
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlike bottleneck/numpy which consider this
# to be 0
return name not in ["nansum", "nanprod"]
return False
def _has_infs(result) -> bool:
if isinstance(result, np.ndarray):
if result.dtype == "f8":
return lib.has_infs_f8(result.ravel("K"))
elif result.dtype == "f4":
return lib.has_infs_f4(result.ravel("K"))
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(
dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None
):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == "+inf":
return np.inf
else:
return -np.inf
else:
if fill_value_typ == "+inf":
# need the max int here
return np.iinfo(np.int64).max
else:
return iNaT
def _maybe_get_mask(
values: np.ndarray, skipna: bool, mask: np.ndarray | None
) -> np.ndarray | None:
"""
Compute a mask if and only if necessary.
This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray]
"""
if mask is None:
if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype):
# Boolean data cannot contain nulls, so signal via mask being None
return None
if skipna or needs_i8_conversion(values.dtype):
mask = isna(values)
return mask
def _get_values(
values: np.ndarray,
skipna: bool,
fill_value: Any = None,
fill_value_typ: str | None = None,
mask: np.ndarray | None = None,
) -> tuple[np.ndarray, np.ndarray | None, np.dtype, np.dtype, Any]:
"""
Utility to get the values view, mask, dtype, dtype_max, and fill_value.
If both mask and fill_value/fill_value_typ are not None and skipna is True,
the values array will be copied.
For input arrays of boolean or integer dtypes, copies will only occur if a
precomputed mask, a fill_value/fill_value_typ, and skipna=True are
provided.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
fill_value : Any
value to fill NaNs with
fill_value_typ : str
Set to '+inf' or '-inf' to handle dtype-specific infinities
mask : Optional[np.ndarray]
nan-mask if known
Returns
-------
values : ndarray
Potential copy of input value array
mask : Optional[ndarray[bool]]
Mask for values, if deemed necessary to compute
dtype : np.dtype
dtype for values
dtype_max : np.dtype
platform independent dtype
fill_value : Any
fill value used
"""
# In _get_values is only called from within nanops, and in all cases
# with scalar fill_value. This guarantee is important for the
# np.where call below
assert is_scalar(fill_value)
# error: Incompatible types in assignment (expression has type "Union[Any,
# Union[ExtensionArray, ndarray]]", variable has type "ndarray")
values = extract_array(values, extract_numpy=True) # type: ignore[assignment]
mask = _maybe_get_mask(values, skipna, mask)
dtype = values.dtype
datetimelike = False
if needs_i8_conversion(values.dtype):
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
values = np.asarray(values.view("i8"))
datetimelike = True
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(
dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
)
if skipna and (mask is not None) and (fill_value is not None):
if mask.any():
if dtype_ok or datetimelike:
values = values.copy()
np.putmask(values, mask, fill_value)
else:
# np.where will promote if needed
values = np.where(~mask, values, fill_value)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.dtype(np.int64)
elif is_float_dtype(dtype):
dtype_max = np.dtype(np.float64)
return values, mask, dtype, dtype_max, fill_value
def _na_ok_dtype(dtype: DtypeObj) -> bool:
if needs_i8_conversion(dtype):
return False
return not issubclass(dtype.type, np.integer)
def _wrap_results(result, dtype: np.dtype, fill_value=None):
""" wrap our results if needed """
if result is NaT:
pass
elif is_datetime64_any_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
if isna(result):
result = np.datetime64("NaT", "ns")
else:
result = np.int64(result).view("datetime64[ns]")
else:
# If we have float dtype, taking a view will give the wrong result
result = result.astype(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > np.iinfo(np.int64).max:
raise ValueError("overflow in timedelta operation")
result = Timedelta(result, unit="ns")
else:
result = result.astype("m8[ns]").view(dtype)
return result
def _datetimelike_compat(func: F) -> F:
"""
If we have datetime64 or timedelta64 values, ensure we have a correct
mask before calling the wrapped function, then cast back afterwards.
"""
@functools.wraps(func)
def new_func(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
**kwargs,
):
orig_values = values
datetimelike = values.dtype.kind in ["m", "M"]
if datetimelike and mask is None:
mask = isna(values)
result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
if datetimelike:
result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
if not skipna:
assert mask is not None # checked above
result = _mask_datetimelike_result(result, axis, mask, orig_values)
return result
return cast(F, new_func)
def _na_for_min_count(values: np.ndarray, axis: int | None) -> Scalar | np.ndarray:
"""
Return the missing value for `values`.
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction, required if values.ndim > 1.
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if is_numeric_dtype(values):
values = values.astype("float64")
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
elif axis is None:
return fill_value
else:
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
return np.full(result_shape, fill_value, dtype=values.dtype)
def nanany(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> bool:
"""
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask)
# For object type, any won't necessarily return
# boolean values (numpy/numpy#4352)
if is_object_dtype(values):
values = values.astype(bool)
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
# "bool")
return values.any(axis) # type: ignore[return-value]
def nanall(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> bool:
"""
Check if all elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanall(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 0])
>>> nanops.nanall(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask)
# For object type, all won't necessarily return
# boolean values (numpy/numpy#4352)
if is_object_dtype(values):
values = values.astype(bool)
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
# "bool")
return values.all(axis) # type: ignore[return-value]
@disallow("M8")
@_datetimelike_compat
def nansum(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
min_count: int = 0,
mask: np.ndarray | None = None,
) -> float:
"""
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s)
3.0
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
# error: Incompatible types in assignment (expression has type
# "Type[float64]", variable has type "dtype")
dtype_sum = np.float64 # type: ignore[assignment]
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
return the_sum
def _mask_datetimelike_result(
result: np.ndarray | np.datetime64 | np.timedelta64,
axis: int | None,
mask: np.ndarray,
orig_values: np.ndarray,
) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType:
if isinstance(result, np.ndarray):
# we need to apply the mask
result = result.astype("i8").view(orig_values.dtype)
axis_mask = mask.any(axis=axis)
result[axis_mask] = iNaT
else:
if mask.any():
return NaT
return result
@disallow(PeriodDtype)
@bottleneck_switch()
@_datetimelike_compat
def nanmean(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> float:
"""
Compute the mean of the element along an axis ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanmean(s)
1.5
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
dtype_count = np.dtype(np.float64)
# not using needs_i8_conversion because that includes period
if dtype.kind in ["m", "M"]:
dtype_sum = np.dtype(np.float64)
elif is_integer_dtype(dtype):
dtype_sum = np.dtype(np.float64)
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, "ndim", False):
count = cast(np.ndarray, count)
with np.errstate(all="ignore"):
# suppress division by zero warnings
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
@bottleneck_switch()
def nanmedian(values, *, axis=None, skipna=True, mask=None):
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s)
2.0
"""
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings("ignore", "All-NaN slice encountered")
res = np.nanmedian(x[mask])
return res
values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask)
if not is_float_dtype(values.dtype):
try:
values = values.astype("f8")
except ValueError as err:
# e.g. "could not convert string to float: 'a'"
raise TypeError(str(err)) from err
if mask is not None:
values[mask] = np.nan
if axis is None:
values = values.ravel("K")
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
res = np.apply_along_axis(get_median, axis, values)
else:
# fastpath for the skipna case
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings("ignore", "All-NaN slice encountered")
res = np.nanmedian(values, axis)
else:
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
res = get_empty_reduction_result(values.shape, axis, np.float_, np.nan)
else:
# otherwise return a scalar value
res = get_median(values) if notempty else np.nan
return _wrap_results(res, dtype)
def get_empty_reduction_result(
shape: tuple[int, ...], axis: int, dtype: np.dtype, fill_value: Any
) -> np.ndarray:
"""
The result from a reduction on an empty ndarray.
Parameters
----------
shape : Tuple[int]
axis : int
dtype : np.dtype
fill_value : Any
Returns
-------
np.ndarray
"""
shp = np.array(shape)
dims = np.arange(len(shape))
ret = np.empty(shp[dims != axis], dtype=dtype)
ret.fill(fill_value)
return ret
def _get_counts_nanvar(
values_shape: Shape,
mask: np.ndarray | None,
axis: int | None,
ddof: int,
dtype: Dtype = float,
) -> tuple[int | np.ndarray, int | np.ndarray]:
"""
Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int, ...]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
d : scalar or array
"""
dtype = get_dtype(dtype)
count = _get_counts(values_shape, mask, axis, dtype=dtype)
# error: Unsupported operand types for - ("int" and "generic")
# error: Unsupported operand types for - ("float" and "generic")
d = count - dtype.type(ddof) # type: ignore[operator]
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
# error: Incompatible types in assignment (expression has type
# "Union[bool, Any]", variable has type "ndarray")
mask2: np.ndarray = count <= ddof # type: ignore[assignment]
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
# error: Incompatible return value type (got "Tuple[Union[int, float,
# ndarray], Any]", expected "Tuple[Union[int, ndarray], Union[int,
# ndarray]]")
return count, d # type: ignore[return-value]
@bottleneck_switch(ddof=1)
def nanstd(values, *, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the standard deviation along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanstd(s)
1.0
"""
if values.dtype == "M8[ns]":
values = values.view("m8[ns]")
orig_dtype = values.dtype
values, mask, _, _, _ = _get_values(values, skipna, mask=mask)
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
return _wrap_results(result, orig_dtype)
@disallow("M8", "m8")
@bottleneck_switch(ddof=1)
def nanvar(values, *, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s)
1.0
"""
values = extract_array(values, extract_numpy=True)
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if is_any_int_dtype(dtype):
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if is_float_dtype(values.dtype):
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
if mask is not None:
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return result
@disallow("M8", "m8")
def nansem(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
ddof: int = 1,
mask: np.ndarray | None = None,
) -> float:
"""
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s)
0.5773502691896258
"""
# This checks if non-numeric-like data is passed with numeric_only=False
# and raises a TypeError otherwise
nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
@_datetimelike_compat
def reduction(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> Dtype:
values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
)
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except (AttributeError, TypeError, ValueError):
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _maybe_null_out(result, axis, mask, values.shape)
return result
return reduction
nanmin = _nanminmax("min", fill_value_typ="+inf")
nanmax = _nanminmax("max", fill_value_typ="-inf")
@disallow("O")
def nanargmax(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> int | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of max value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(arr)
4
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 2] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
array([2, 2, 1, 1])
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask)
# error: Need type annotation for 'result'
result = values.argmax(axis) # type: ignore[var-annotated]
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("O")
def nanargmin(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> int | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of min value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmin(arr)
0
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 0] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[nan, 7., 8.],
[nan, 10., 11.]])
>>> nanops.nanargmin(arr, axis=1)
array([0, 0, 1, 1])
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask)
# error: Need type annotation for 'result'
result = values.argmin(axis) # type: ignore[var-annotated]
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("M8", "m8")
def nanskew(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> float:
"""
Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 1, 2])
>>> nanops.nanskew(s)
1.7320508075688787
"""
# error: Incompatible types in assignment (expression has type "Union[Any,
# Union[ExtensionArray, ndarray]]", variable has type "ndarray")
values = extract_array(values, extract_numpy=True) # type: ignore[assignment]
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
#
# #18044 in _libs/windows.pyx calc_skew follow this behavior
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid="ignore", divide="ignore"):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow("M8", "m8")
def nankurt(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
mask: np.ndarray | None = None,
) -> float:
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
# error: Incompatible types in assignment (expression has type "Union[Any,
# Union[ExtensionArray, ndarray]]", variable has type "ndarray")
values = extract_array(values, extract_numpy=True) # type: ignore[assignment]
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid="ignore", divide="ignore"):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numerator = count * (count + 1) * (count - 1) * m4
denominator = (count - 2) * (count - 3) * m2 ** 2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numerator = _zero_out_fperr(numerator)
denominator = _zero_out_fperr(denominator)
if not isinstance(denominator, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denominator == 0:
return 0
with np.errstate(invalid="ignore", divide="ignore"):
result = numerator / denominator - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denominator == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow("M8", "m8")
def nanprod(
values: np.ndarray,
*,
axis: int | None = None,
skipna: bool = True,
min_count: int = 0,
mask: np.ndarray | None = None,
) -> float:
"""
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, 3, np.nan])
>>> nanops.nanprod(s)
6.0
"""
mask = _maybe_get_mask(values, skipna, mask)
if skipna and mask is not None:
values = values.copy()
values[mask] = 1
result = values.prod(axis)
# error: Incompatible return value type (got "Union[ndarray, float]", expected
# "float")
return _maybe_null_out( # type: ignore[return-value]
result, axis, mask, values.shape, min_count=min_count
)
def _maybe_arg_null_out(
result: np.ndarray, axis: int | None, mask: np.ndarray | None, skipna: bool
) -> np.ndarray | int:
# helper function for nanargmin/nanargmax
if mask is None:
return result
if axis is None or not getattr(result, "ndim", False):
if skipna:
if mask.all():
# error: Incompatible types in assignment (expression has type
# "int", variable has type "ndarray")
result = -1 # type: ignore[assignment]
else:
if mask.any():
# error: Incompatible types in assignment (expression has type
# "int", variable has type "ndarray")
result = -1 # type: ignore[assignment]
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(
values_shape: tuple[int, ...],
mask: np.ndarray | None,
axis: int | None,
dtype: Dtype = float,
) -> int | float | np.ndarray:
"""
Get the count of non-null values along an axis
Parameters
----------
values_shape : tuple of int
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
"""
dtype = get_dtype(dtype)
if axis is None:
if mask is not None:
n = mask.size - mask.sum()
else:
n = np.prod(values_shape)
# error: Incompatible return value type (got "Union[Any, generic]",
# expected "Union[int, float, ndarray]")
return dtype.type(n) # type: ignore[return-value]
if mask is not None:
count = mask.shape[axis] - mask.sum(axis)
else:
count = values_shape[axis]
if is_scalar(count):
# error: Incompatible return value type (got "Union[Any, generic]",
# expected "Union[int, float, ndarray]")
return dtype.type(count) # type: ignore[return-value]
try:
return count.astype(dtype)
except AttributeError:
# error: Argument "dtype" to "array" has incompatible type
# "Union[ExtensionDtype, dtype]"; expected "Union[dtype, None, type,
# _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int,
# Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]"
return np.array(count, dtype=dtype) # type: ignore[arg-type]
def _maybe_null_out(
result: np.ndarray | float | NaTType,
axis: int | None,
mask: np.ndarray | None,
shape: tuple[int, ...],
min_count: int = 1,
) -> np.ndarray | float | NaTType:
"""
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
"""
if mask is not None and axis is not None and isinstance(result, np.ndarray):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype("c16")
else:
result = result.astype("f8")
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not NaT:
if check_below_min_count(shape, mask, min_count):
result = np.nan
return result
def check_below_min_count(
shape: tuple[int, ...], mask: np.ndarray | None, min_count: int
) -> bool:
"""
Check for the `min_count` keyword. Returns True if below `min_count` (when
missing value should be returned from the reduction).
Parameters
----------
shape : tuple
The shape of the values (`values.shape`).
mask : ndarray or None
Boolean numpy array (typically of same shape as `shape`) or None.
min_count : int
Keyword passed through from sum/prod call.
Returns
-------
bool
"""
if min_count > 0:
if mask is None:
# no missing values, only check size
non_nulls = np.prod(shape)
else:
non_nulls = mask.size - mask.sum()
if non_nulls < min_count:
return True
return False
def _zero_out_fperr(arg):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
with np.errstate(invalid="ignore"):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow("M8", "m8")
def nancorr(
a: np.ndarray, b: np.ndarray, *, method="pearson", min_periods: int | None = None
):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError("Operands to nancorr must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method == "kendall":
from scipy.stats import kendalltau
def func(a, b):
return kendalltau(a, b)[0]
return func
elif method == "spearman":
from scipy.stats import spearmanr
def func(a, b):
return spearmanr(a, b)[0]
return func
elif method == "pearson":
def func(a, b):
return np.corrcoef(a, b)[0, 1]
return func
elif callable(method):
return method
raise ValueError(
f"Unknown method '{method}', expected one of "
"'kendall', 'spearman', 'pearson', or callable"
)
@disallow("M8", "m8")
def nancov(
a: np.ndarray,
b: np.ndarray,
*,
min_periods: int | None = None,
ddof: int | None = 1,
):
if len(a) != len(b):
raise AssertionError("Operands to nancov must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b, ddof=ddof)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except (TypeError, ValueError):
try:
x = x.astype(np.float64)
except ValueError as err:
# GH#29941 we get here with object arrays containing strs
raise TypeError(f"Could not convert {x} to numeric") from err
else:
if not np.any(np.imag(x)):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except (TypeError, ValueError):
# e.g. "1+1j" or "foo"
try:
x = complex(x)
except ValueError as err:
# e.g. "foo"
raise TypeError(f"Could not convert {x} to numeric") from err
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all="ignore"):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype("O")
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def _nanpercentile_1d(
values: np.ndarray, mask: np.ndarray, q: np.ndarray, na_value: Scalar, interpolation
) -> Scalar | np.ndarray:
"""
Wrapper for np.percentile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
q : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
interpolation : str
Returns
-------
quantiles : scalar or array
"""
# mask is Union[ExtensionArray, ndarray]
values = values[~mask]
if len(values) == 0:
return np.array([na_value] * len(q), dtype=values.dtype)
return np.percentile(values, q, interpolation=interpolation)
def nanpercentile(
values: np.ndarray,
q: np.ndarray,
*,
na_value,
mask: np.ndarray,
interpolation,
):
"""
Wrapper for np.percentile that skips missing values.
Parameters
----------
values : np.ndarray[ndim=2] over which to find quantiles
q : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
mask : ndarray[bool]
locations in values that should be considered missing
interpolation : str
Returns
-------
quantiles : scalar or array
"""
if values.dtype.kind in ["m", "M"]:
# need to cast to integer to avoid rounding errors in numpy
result = nanpercentile(
values.view("i8"),
q=q,
na_value=na_value.view("i8"),
mask=mask,
interpolation=interpolation,
)
# Note: we have to do `astype` and not view because in general we
# have float result at this point, not i8
return result.astype(values.dtype)
if not lib.is_scalar(mask) and mask.any():
# Caller is responsible for ensuring mask shape match
assert mask.shape == values.shape
result = [
_nanpercentile_1d(val, m, q, na_value, interpolation=interpolation)
for (val, m) in zip(list(values), list(mask))
]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=1, interpolation=interpolation)
def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
"""
Cumulative function with skipna support.
Parameters
----------
values : np.ndarray or ExtensionArray
accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate}
skipna : bool
Returns
-------
np.ndarray or ExtensionArray
"""
mask_a, mask_b = {
np.cumprod: (1.0, np.nan),
np.maximum.accumulate: (-np.inf, np.nan),
np.cumsum: (0.0, np.nan),
np.minimum.accumulate: (np.inf, np.nan),
}[accum_func]
# We will be applying this function to block values
if values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = values.dtype
# We need to define mask before masking NaTs
mask = isna(values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = values
changed = False
result = accum_func(y.view("i8"), axis=0)
if skipna:
result[mask] = iNaT
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(values.dtype, np.dtype):
result = result.view(orig_dtype)
else:
# DatetimeArray/TimedeltaArray
# TODO: have this case go through a DTA method?
# For DatetimeTZDtype, view result as M8[ns]
npdtype = orig_dtype if isinstance(orig_dtype, np.dtype) else "M8[ns]"
# error: "Type[ExtensionArray]" has no attribute "_simple_new"
result = type(values)._simple_new( # type: ignore[attr-defined]
result.view(npdtype), dtype=orig_dtype
)
elif skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)):
vals = values.copy()
mask = isna(vals)
vals[mask] = mask_a
result = accum_func(vals, axis=0)
result[mask] = mask_b
else:
result = accum_func(values, axis=0)
return result
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/core/strings.py | 9 | 50351 | import numpy as np
from pandas.compat import zip
from pandas.core.common import (isnull, _values_from_object, is_bool_dtype, is_list_like,
is_categorical_dtype, is_object_dtype, take_1d)
import pandas.compat as compat
from pandas.core.base import AccessorProperty, NoNewAttributesMixin
from pandas.util.decorators import Appender, deprecate_kwarg
import re
import pandas.lib as lib
import warnings
import textwrap
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, an NA in any array will propagate
Returns
-------
concat : Series/Index of objects or str
Examples
--------
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isnull(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isnull(arr)
if na_rep is None and mask.any():
return np.nan
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
from pandas.core.series import Series
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, Series):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isnull(arr)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8))
except (TypeError, AttributeError):
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning, stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=True, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string
Character sequence or regular expression
repl : string
Replacement sequence
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
replaced : Series/Index of objects
"""
use_re = not case or len(pat) > 1 or flags
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
def f(x):
return regex.sub(repl, x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if np.isscalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
"""
Deprecated: Find groups in each string in the Series/Index
using passed regular expression.
If as_indexer=True, determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : False, by default, gives deprecated behavior better achieved
using str_extract. True return boolean indexer.
Returns
-------
Series/array of boolean values
if as_indexer=True
Series/Index of tuples
if as_indexer=False, default but deprecated
See Also
--------
contains : analagous, but less strict, relying on re.search instead of
re.match
extract : now preferred to the deprecated usage of match (as_indexer=False)
Notes
-----
To extract matched groups, which is the deprecated behavior of match, use
str.extract.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (not as_indexer) and regex.groups > 0:
# Do this first, to make sure it happens even if the re.compile
# raises below.
warnings.warn("In future versions of pandas, match will change to"
" always return a bool indexer.", FutureWarning,
stacklevel=3)
if as_indexer and regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning, stacklevel=3)
# If not as_indexer and regex.groups == 0, this returns empty lists
# and is basically useless, so we will not warn.
if (not as_indexer) and regex.groups > 0:
dtype = object
def f(x):
m = regex.match(x)
if m:
return m.groups()
else:
return []
else:
# This is the new behavior of str_match.
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def str_extract(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
extracted groups : Series (one group) or DataFrame (multiple groups)
Note that dtype of the result is always object, even when no match is
found and the result is a Series or DataFrame containing only NaN
values.
Examples
--------
A pattern with one group will return a Series. Non-matches will be NaN.
>>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
0 1
1 2
2 NaN
dtype: object
A pattern with more than one group will return a DataFrame.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
"""
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
regex = re.compile(pat, flags=flags)
# just to be safe, check this
if regex.groups == 0:
raise ValueError("This pattern contains no groups to capture.")
empty_row = [np.nan]*regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
if regex.groups == 1:
result = np.array([f(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame([f(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
from pandas.core.frame import DataFrame
from pandas.core.index import Index
# GH9980, Index.str does not support get_dummies() as it returns a frame
if isinstance(arr, Index):
raise TypeError("get_dummies is not supported for string methods on Index")
# TODO remove this hack?
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return DataFrame(dummies, arr.index, tags)
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
.. versionadded:: 0.16.1
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
"""
Split each string in the Series/Index by the given delimiter
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
.. versionadded:: 0.16.2
Parameters
----------
pat : string, default None
Separator to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode ordinals,
strings, or None. Unmapped characters are left untouched. Characters
mapped to None are deleted. :meth:`str.maketrans` is a helper function
for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index to unicode
using indicated encoding. Equivalent to :meth:`str.decode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
decoded : Series/Index of objects
"""
f = lambda x: x.decode(encoding, errors)
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index to some other encoding
using indicated encoding. Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : string
errors : string
Returns
-------
encoded : Series/Index of objects
"""
f = lambda x: x.encode(encoding, errors)
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._data, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._data, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._data, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._data, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._is_categorical = is_categorical_dtype(data)
self._data = data.cat.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop,
step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notnull().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True, name=None):
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this before
# the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
# leave as it is to keep extract and get_dummies results
# can be merged to _wrap_result_expand in v0.17
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.index import Index
if not hasattr(result, 'ndim'):
return result
name = name or getattr(result, 'name', None) or self._orig.name
if result.ndim == 1:
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
return Index(result, name=name)
return Series(result, index=self._orig.index, name=name)
else:
assert result.ndim < 3
return DataFrame(result, index=self._orig.index)
def _wrap_result_expand(self, result, expand=False):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
# for category, we do the stuff on the categories, so blow it up
# to the full series again
if self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
from pandas.core.index import Index, MultiIndex
if not hasattr(result, 'ndim'):
return result
if isinstance(self._orig, Index):
name = getattr(result, 'name', None)
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if hasattr(result, 'dtype') and is_bool_dtype(result):
return result
if expand:
result = list(result)
return MultiIndex.from_tuples(result, names=name)
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
def cons_row(x):
if is_list_like(x):
return x
else:
return [ x ]
cons = self._orig._constructor_expanddim
data = [cons_row(x) for x in result]
return cons(data, index=index)
else:
name = getattr(result, 'name', None)
cons = self._orig._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
data = self._orig if self._is_categorical else self._data
result = str_cat(data, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@deprecate_kwarg('return_type', 'expand',
mapping={'series': False, 'frame': True})
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._data, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
@copy(str_rsplit)
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._data, pat, n=n)
return self._wrap_result_expand(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {'side': 'first',
'return': '3 elements containing the string itself, followed by two empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._data)
return self._wrap_result_expand(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {'side': 'last',
'return': '3 elements containing two empty strings, followed by the string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._data)
return self._wrap_result_expand(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._data, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._data, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._data, pat, case=case, flags=flags,
na=na, regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=False):
result = str_match(self._data, pat, case=case, flags=flags,
na=na, as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=True, flags=0):
result = str_replace(self._data, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._data, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._data, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
""""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self._data, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._data, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._data, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._data, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._data, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._data, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._data
result = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._data, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0):
result, name = str_extract(self._data, pat, flags=flags)
return self._wrap_result(result, name=name)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] % dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] % dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._data)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as ``str.%(similar)s``
except instead of returning -1, it raises a ValueError when the substring
is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] % dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
class StringAccessorMixin(object):
""" Mixin to add a `.str` acessor to the class."""
# string methods
def _make_str_accessor(self):
from pandas.core.series import Series
from pandas.core.index import Index
if isinstance(self, Series) and not(
(is_categorical_dtype(self.dtype) and
is_object_dtype(self.values.categories)) or
(is_object_dtype(self.dtype))):
# it's neither a string series not a categorical series with strings
# inside the categories.
# this really should exclude all series with any non-string values (instead of test
# for object dtype), but that isn't practical for performance reasons until we have a
# str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(self, Index):
# see scc/inferrence.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if self.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or 'mixed')")
raise AttributeError(message)
if self.nlevels > 1:
message = "Can only use .str accessor with Index, not MultiIndex"
raise AttributeError(message)
return StringMethods(self)
str = AccessorProperty(StringMethods, _make_str_accessor)
def _dir_additions(self):
return set()
def _dir_deletions(self):
try:
getattr(self, 'str')
except AttributeError:
return set(['str'])
return set()
| mit |
suiyuan2009/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 31 | 60315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
rsmailach/SRPT | SRPTE_Catastrophic.py | 1 | 34877 | #----------------------------------------------------------------------#
# SRPTE.py
#
# This application simulates a single server with Poisson arrivals
# and processing times of a general distribution. There are errors in
# time estimates within a range. Jobs are serviced in order of shortest
# remaining processing time.
#
# Rachel Mailach
#----------------------------------------------------------------------#
from tkinter import *
#from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from datetime import datetime
from math import log
import plotly.plotly as py
from plotly.graph_objs import Scatter
import plotly.graph_objs as go
#from scipy import integrate as integrate
import copy
import random
import csv
import operator
import sqlite3
import pandas
conn=sqlite3.connect('SingleServerDatabase_SRPTE.db')
NumJobs = []
AvgNumJobs = []
NumJobsTime = []
TimeSys = []
ProcTime = []
PercError = []
#----------------------------------------------------------------------#
# Class: GUI
#
# This class is used as a graphical user interface for the application.
#
#----------------------------------------------------------------------#
class GUI(Tk):
def __init__(self, master):
Tk.__init__(self, master)
self.master = master # reference to parent
self.statusText = StringVar()
global SEED
#SEED = random.randint(0, 1000000000)
SEED = 994863731
random.seed(SEED)
# Create the input frame
self.frameIn = Input(self)
self.frameIn.pack(side=TOP, fill=BOTH, padx = 5, pady =5, ipadx = 5, ipady = 5)
# Create the output frame
self.frameOut = Output(self)
self.frameOut.pack(side=TOP, fill=BOTH, padx = 5, pady =5, ipadx = 5, ipady = 5)
# Bind simulate button
self.bind("<<input_simulate>>", self.submit)
# Bind save button
self.bind("<<output_save>>", self.saveData)
# Bind clear button
self.bind("<<output_clear>>", self.clearConsole)
# Bind stop button
self.bind("<<stop_sim>>", self.stopSimulation)
# Status Bar
status = Label(self.master, textvariable=self.statusText, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, anchor=W, fill=X)
# Initialize console
self.consoleFrame = Frame(self.frameOut)
self.console = Text(self.consoleFrame, wrap = WORD)
self.makeConsole()
self.printIntro()
self.updateStatusBar("Waiting for submit...")
def makeConsole(self):
#self.consoleFrame = Frame(self.frameOut)
self.consoleFrame.pack(side=TOP, padx=5, pady=5)
#self.console = Text(self.consoleFrame, wrap = WORD)
self.console.config(state=DISABLED) # start with console as disabled (non-editable)
self.scrollbar = Scrollbar(self.consoleFrame)
self.scrollbar.config(command = self.console.yview)
self.console.config(yscrollcommand=self.scrollbar.set)
self.console.grid(column=0, row=0)
self.scrollbar.grid(column=1, row=0, sticky='NS')
def writeToConsole(self, text = ' '):
self.console.config(state=NORMAL) # make console editable
self.console.insert(END, '%s\n'%text)
self.update()
self.console.yview(END) # auto-scroll
self.console.config(state=DISABLED) # disable (non-editable) console
def saveData(self, event):
# Get filename
filename = fileDialog.asksaveasfilename(title="Save as...", defaultextension='.txt')
if filename:
file = open(filename, mode='w')
data = self.console.get(1.0, END)
encodedData = data.encode('utf-8')
text = str(encodedData)
file.write(text)
file.close()
# Empty arrivals file at the begining of each simulation
def clearSavedArrivals(self):
with open("Arrivals.txt", "w") as myFile:
myFile.write('Job Name, Arrival Time, RPT, ERPT' + '\n')
myFile.close()
def clearConsole(self, event):
self.console.config(state=NORMAL) # make console editable
self.console.delete('1.0', END)
self.console.config(state=DISABLED) # disable (non-editable) console
def updateStatusBar(self, text=' '):
self.statusText.set(text)
def printIntro(self):
self.writeToConsole("SRPTE \n\n This application simulates a single server with Poisson arrivals and processing times of a general distribution. Each arrival has an estimation error within a percent error taken as input. Jobs are serviced in order of shortest remaining processing time.")
def saveParams(self, load, arrRate, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength, alpha, lower, upper):
##params = pandas.DataFrame(columns=('seed', 'numServers', 'load', 'arrRate', 'arrDist', 'procRate', 'procDist', 'alpha', 'lower', 'upper', 'percErrorMin', 'percErrorMax', 'simLength'))
print (SEED)
params = pandas.DataFrame({ 'seed' : [SEED],
'load' : [load],
'arrRate' : [arrRate],
'arrDist' : [arrDist],
'procRate' : [procRate],
'procDist' : [procDist],
'alpha' : [alpha],
'lower' : [lower],
'upper' : [upper],
'percErrorMin' : [percErrorMin],
'percErrorMax' : [percErrorMax],
'simLength' : [simLength],
'avgNumJobs' : [MachineClass.AvgNumJobs]
})
params.to_sql(name='parameters', con=conn, if_exists='append')
print (params)
def printParams(self, load, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength):
self.writeToConsole("--------------------------------------------------------------------------------")
self.writeToConsole("PARAMETERS:")
self.writeToConsole("Load = %.4f"%load)
#self.writeToConsole("Arrival Rate = %.4f"%arrRate)
self.writeToConsole("Arrival Distribution = %s"%arrDist)
self.writeToConsole("Processing Rate = %.4f, Processing Distribution = %s"%(procRate, str(procDist)))
self.writeToConsole("% Error = " + u"\u00B1" + " %.4f, %.4f"%(percErrorMin, percErrorMax))
self.writeToConsole("Simulation Length = %.4f\n\n"%simLength)
def plotNumJobsInSys(self):
py.sign_in('mailacrs','wowbsbc0qo')
trace0 = Scatter(x=NumJobsTime, y=NumJobs)
data = [trace0]
layout = go.Layout(
title='Number of Jobs Over Time',
xaxis=dict(
title='Time',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Number of Jobs',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
unique_url = py.plot(fig, filename = 'SRPT_NumJobs')
def plotAvgNumJobsInSys(self):
py.sign_in('mailacrs','wowbsbc0qo')
trace0 = Scatter(x=NumJobsTime, y=AvgNumJobs)
data = [trace0]
layout = go.Layout(
title='Average Number of Jobs Over Time',
xaxis=dict(
title='Time',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Number of Jobs',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
unique_url = py.plot(fig, filename = 'SRPT_AvgNumJobs')
def calcVariance(self, List, avg):
var = 0
for i in List:
var += (avg - i)**2
return var/len(List)
def displayAverageData(self):
##AvgNumJobs = int(float(sum(NumJobs))/len(NumJobs))
AvgNumJobs = MachineClass.AvgNumJobs
AvgTimeSys = float(sum(TimeSys))/len(TimeSys)
AvgProcTime = float(sum(ProcTime))/len(ProcTime)
VarProcTime = self.calcVariance(ProcTime, AvgProcTime)
AvgPercError = float(sum(PercError))/len(PercError)
self.writeToConsole('\n\nAverage number of jobs in the system %s' %AvgNumJobs)
self.writeToConsole('Average time in system, from start to completion is %s' %AvgTimeSys)
self.writeToConsole('Average processing time, based on generated service times is %s' %AvgProcTime)
self.writeToConsole('Variance of processing time %s' %VarProcTime)
self.writeToConsole('Average percent error %.4f\n' %AvgPercError)
#self.writeToConsole('Request order: %s' % ArrivalClass.JobOrderIn)
#self.writeToConsole('Service order: %s\n\n' % MachineClass.JobOrderOut)
def stopSimulation(self, event):
MachineClass.StopSim = True
def submit(self, event):
self.updateStatusBar("Simulating...")
self.clearSavedArrivals()
I = Input(self)
self.printParams(I.valuesList[0], #load
'Exponential', #arrival dist
#I.valuesList[1], # arrival rate
I.valuesList[2], I.distList[1], #processing rate
I.valuesList[3], #error min
I.valuesList[4], #error max
I.valuesList[5]) #sim time
main.timesClicked = 0
# Start process
MC = MachineClass(self)
MC.run( I.valuesList[0], # load
'Exponential', # arrival
#I.valuesList[1], # arrival rate
I.valuesList[2], I.distList[1], # processing
I.valuesList[3], # error min
I.valuesList[4], # error max
I.valuesList[5]) # sim time
self.saveParams(I.valuesList[0], #load
'?', # arrival rate
'Exponential', # arrival dist
'?', I.distList[1], # processing
I.valuesList[3], # error min
I.valuesList[4], # error max
I.valuesList[5], # sim time
JobClass.BPArray[0], # alpha
JobClass.BPArray[1], # lower
JobClass.BPArray[2]) # upper
self.displayAverageData()
self.plotNumJobsInSys()
self.plotAvgNumJobsInSys()
#self.saveData()
self.updateStatusBar("Simulation complete.")
#----------------------------------------------------------------------#
# Class: Input
#
# This class is used as a graphical user interface for a larger
# application.
#
#----------------------------------------------------------------------#
class Input(LabelFrame):
def __init__(self, master):
LabelFrame.__init__(self, master, text = "Input")
self.master = master
self.loadInput = DoubleVar()
self.arrivalRateInput = DoubleVar()
self.processingRateInput = DoubleVar()
self.percentErrorMinInput = DoubleVar()
self.percentErrorMaxInput = DoubleVar()
self.simLengthInput = DoubleVar()
self.errorMessage = StringVar()
self.comboboxVal = StringVar()
self.loadInput.set(0.95) ##################################CHANGE LATER
#self.arrivalRateInput.set(1.0) ##################################CHANGE LATER
self.processingRateInput.set(0.5) ##################################CHANGE LATER
self.percentErrorMinInput.set(-50) ##################################CHANGE LATER
self.percentErrorMaxInput.set(0) ##################################CHANGE LATER
self.simLengthInput.set(5000000.0)
self.grid_columnconfigure(0, weight=2)
self.grid_columnconfigure(1, weight=2)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(4, weight=1)
self.grid_columnconfigure(5, weight=2)
self.grid_rowconfigure(0, weight=1)
# Labels
labels = ['System Load', 'Interarrival Rate (' + u'\u03bb' + ')', 'Processing Rate (' + u'\u03bc' + ')', '% Error' , 'Simulation Length']
r=0
c=0
for elem in labels:
Label(self, text=elem).grid(row=r, column=c)
r=r+1
Label(self, textvariable=self.errorMessage, fg="red", font=14).grid(row=6, columnspan=4) #error message, invalid input
#Label(self, text=u"\u00B1").grid(row=3, column=1) # +/-
Label(self, text="Min").grid(row=3, column=1, sticky = E)
Label(self, text="Max").grid(row=3, column=3, sticky = W)
# Entry Boxes
self.entry_0 = Entry(self, textvariable = self.loadInput)
self.entry_1 = Entry(self, textvariable = self.arrivalRateInput)
self.entry_2 = Entry(self, textvariable = self.processingRateInput)
self.entry_3a = Entry(self, textvariable = self.percentErrorMinInput, width = 5)
self.entry_3b = Entry(self, textvariable = self.percentErrorMaxInput, width = 5)
self.entry_4 = Entry(self, textvariable = self.simLengthInput)
self.entry_0.grid(row = 0, column = 1)
self.entry_1.grid(row = 1, column = 1)
self.entry_2.grid(row = 2, column = 1)
self.entry_3a.grid(row = 3, column = 2, sticky = E)
self.entry_3b.grid(row = 3, column = 4, sticky = W)
self.entry_4.grid(row = 4, column = 1)
# Disable interarrival time
self.entry_1.delete(0, 'end')
self.entry_1.configure(state = 'disabled')
# Distribution Dropdowns
self.distributions = ('Select Distribution', 'Poisson', 'Exponential', 'Uniform', 'Bounded Pareto', 'Custom')
self.comboBox_1 = ttk.Combobox(self, values = self.distributions, state = 'disabled')
self.comboBox_1.current(2) # set selection
self.comboBox_1.grid(row = 1, column = 5)
self.comboBox_2 = ttk.Combobox(self, textvariable = self.comboboxVal, values = self.distributions, state = 'readonly')
self.comboBox_2.current(4) # set default selection #####################CHANGE LATER
self.comboBox_2.grid(row = 2, column = 5)
self.comboboxVal.trace("w", self.selectionChange) # refresh on change
self.refreshComboboxes()
# Simulate Button
self.simulateButton = Button(self, text = "SIMULATE", command = self.onButtonClick)
self.simulateButton.grid(row = 7, columnspan = 6)
def entryBoxChange(self, name, index, mode):
self.refreshLoad()
def refreshLoad(self):
if len(self.entry_0.get()) > 0:
self.entry_1.delete(0, 'end')
self.entry_1.configure(state = 'disabled')
else:
self.entry_1.configure(state = 'normal')
self.arrivalRateInput.set(self.arrRateDefault)
if len(self.entry_1.get()) > 0:
self.entry_0.delete(0, 'end')
self.entry_0.configure(state = 'disabled')
else:
self.entry_0.configure(state = 'normal')
self.loadInput.set(self.loadDefault)
def selectionChange(self, name, index, mode):
self.refreshComboboxes()
def refreshComboboxes(self):
selection = self.comboBox_2.get()
if selection == 'Bounded Pareto':
#self.entry_2.delete(0, 'end')
self.entry_2.configure(state = 'disabled')
else:
self.entry_2.configure(state = 'normal')
#self.processingRateInput.set(self.procRateDefault)
def onButtonClick(self):
if (self.getNumericValues() == 0) and (self.getDropDownValues() == 0):
# Send to submit button in main
self.simulateButton.event_generate("<<input_simulate>>")
def getNumericValues(self):
try:
load = self.loadInput.get()
#arrivalRate = self.arrivalRateInput.get()
processingRate = self.processingRateInput.get()
percentErrorMin = self.percentErrorMinInput.get()
percentErrorMax = self.percentErrorMaxInput.get()
maxSimLength = self.simLengthInput.get()
except ValueError:
self.errorMessage.set("One of your inputs is an incorrect type, try again.")
return 1
#try:
# arrRate = float(self.arrivalRateInput.get())
#except ValueError:
# arrRate = 0.0
#try:
# procRate = float(self.processingRateInput.get())
#except ValueError:
# procRate = 0.0
if load <= 0.0:
self.errorMessage.set("System load must be a non-zero value!")
return 1
#if arrivalRate <= 0.0:
# self.errorMessage.set("Arrival rate must be a non-zero value!")
# return 1
#if processingRate <= 0.0:
# self.errorMessage.set("Processing rate must be a non-zero value!")
# return 1
if maxSimLength <= 0.0:
self.errorMessage.set("Simulation length must be a non-zero value!")
return 1
else:
self.errorMessage.set("")
Input.valuesList = [load, 0.0, processingRate, percentErrorMin, percentErrorMax, maxSimLength]
return 0
def getDropDownValues(self):
comboBox1Value = self.comboBox_1.get()
comboBox2Value = self.comboBox_2.get()
if comboBox2Value == 'Select Distribution':
self.errorMessage.set("You must select a distribution for the processing rate")
return 1
else:
self.errorMessage.set("")
Input.distList = [comboBox1Value, comboBox2Value]
return 0
#----------------------------------------------------------------------#
# Class: Output
#
# This class is used as a graphical user interface for a larger
# application.
#
#----------------------------------------------------------------------#
class Output(LabelFrame):
def __init__(self, master):
LabelFrame.__init__(self, master, text = "Output")
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
buttonFrame = Frame(self)
buttonFrame.pack(side=BOTTOM, padx=5, pady=5)
# Clear Button
self.clearButton = Button(buttonFrame, text = "CLEAR DATA", command = self.onClearButtonClick)
self.clearButton.grid(row = 2, column = 0)
# Save Button
self.saveButton = Button(buttonFrame, text = "SAVE DATA", command = self.onSaveButtonClick)
self.saveButton.grid(row=2, column=1)
# Stop Button
self.stopButton = Button(buttonFrame, text = "STOP SIMULATION", command = self.onStopButtonClick)
self.stopButton.grid(row = 2, column = 2)
def onClearButtonClick(self):
# Clear console
self.clearButton.event_generate("<<output_clear>>")
def onSaveButtonClick(self):
# Save data
self.saveButton.event_generate("<<output_save>>")
def onStopButtonClick(self):
# Stop simulation
self.stopButton.event_generate("<<stop_sim>>")
#----------------------------------------------------------------------#
# Class: CustomDist
#
# This class is used to allow users to enter a custom distribution.
#
#----------------------------------------------------------------------#
class CustomDist(object):
def __init__(self, master):
top = self.top = Toplevel(master)
top.geometry("500x200") # set window size
top.resizable(0,0)
self.function = StringVar()
# Label frame
frame1 = Frame(top)
frame1.pack(side=TOP, padx=5, pady=5)
self.l=Label(frame1, text="Please enter the functional inverse of the distribution of your choice. \nExponential distribution is provided as an example. \nNote: x " + u"\u2265" + " 0", font=("Helvetica", 12), justify=LEFT)
self.l.pack()
# Button frame
frame2 = Frame(top)
frame2.pack(side=TOP, padx=5, pady=5)
self.mu=Button(frame2, text=u'\u03bc', command=self.insertMu)
self.mu.pack(side=LEFT)
self.x=Button(frame2, text="x", command=self.insertX)
self.x.pack(side=LEFT)
self.ln=Button(frame2, text="ln", command=self.insertLn)
self.ln.pack(side=LEFT)
# Input frame
frame3 = Frame(top)
frame3.pack(side=TOP, padx=5, pady=5)
self.e = Entry(frame3, textvariable = self.function)
self.e.insert(0, "-ln(1 - x)/" + u'\u03bc')
self.e.pack(fill="both", expand=True)
frame4 = Frame(top)
frame4.pack(side=TOP, pady=10)
self.b=Button(frame4,text='Ok',command=self.cleanup)
self.b.pack()
def cleanup(self):
self.stringEquation=self.convertFunction()
self.top.destroy()
def insertMu(self):
self.e.insert(END, u'\u03bc')
def insertX(self):
self.e.insert(END, "x")
def insertLn(self):
self.e.insert(END, "ln")
def convertFunction(self):
self.stringList = list(self.e.get())
for i in range(len(self.stringList)):
if self.stringList[i] == u'\u03bc':
self.stringList[i] = "procRate"
elif self.stringList[i] == "x":
self.stringList[i] = "random.uniform(0.0, 1.0)"
elif self.stringList[i] == "l" and self.stringList[i+1] == "n":
self.stringList[i] = "log"
self.stringList[i+1] = ""
print ("".join(self.stringList))
return "".join(self.stringList)
#----------------------------------------------------------------------#
# Class: BoundedParetoDist
#
# This class is used to allow users to enter parameters to
# Bounded Pareto distribution.
#
#----------------------------------------------------------------------#
class BoundedParetoDist(object):
Array = []
def __init__(self, master):
top = self.top = Toplevel(master)
top.geometry("500x200") # set window size
top.resizable(0,0)
self.errorMessage = StringVar()
self.alpha = DoubleVar()
self.L = DoubleVar()
self.U = DoubleVar()
# Set default parameters
self.alpha.set(1.1)
self.L.set(1)
self.U.set(10**(6))
# Label frame
frame1 = Frame(top)
frame1.pack(side=TOP, padx=5, pady=5)
self.l=Label(frame1, text="Please enter the parameters you would like.", font=("Helvetica", 12), justify=LEFT)
self.l.pack()
self.error = Label(frame1, textvariable=self.errorMessage, fg="red", font=14)
self.error.pack()
# Input frame
frame2 = Frame(top)
frame2.pack(side=TOP, padx=5, pady=5)
frame2.grid_columnconfigure(0, weight=1)
frame2.grid_rowconfigure(0, weight=1)
self.l1 = Label(frame2, text = "alpha (shape)")
self.l2 = Label(frame2, text = "L (smallest job size)")
self.l3 = Label(frame2, text = "U (largest job size)")
self.l1.grid(row = 0, column = 0)
self.l2.grid(row = 1, column = 0)
self.l3.grid(row = 2, column = 0)
self.e1 = Entry(frame2, textvariable = self.alpha)
self.e2 = Entry(frame2, textvariable = self.L)
self.e3 = Entry(frame2, textvariable = self.U)
self.e1.grid(row = 0, column = 1)
self.e2.grid(row = 1, column = 1)
self.e3.grid(row = 2, column = 1)
frame3 = Frame(top)
frame3.pack(side=TOP, pady=10)
self.b=Button(frame3,text='Ok',command=self.cleanup)
self.b.pack()
def cleanup(self):
if(self.checkParams() == 0):
self.paramArray=BoundedParetoDist.Array
self.top.destroy()
def checkParams(self):
self.a = float(self.e1.get())
self.l = float(self.e2.get())
self.u = float(self.e3.get())
if (self.a <= 0) or (self.u < self.l) or (self.l <= 0):
print ("ERROR: Bounded pareto paramater error")
self.errorMessage.set("Bounded pareto paramater error")
return 1
else:
self.errorMessage.set("")
BoundedParetoDist.Array = [self.a, self.l, self.u]
return 0
#----------------------------------------------------------------------#
# Class: Node
#
# This class is used to define the linked list nodes.
#
#----------------------------------------------------------------------#
class Node():
def __init__(self, job, nextNode = None):
self.job = job
self.nextNode = nextNode
#----------------------------------------------------------------------#
# Class: LinkedList
#
# This class is used to make the linked list data structure used to
# store jobs.
#
#----------------------------------------------------------------------#
class LinkedList(object):
Size = 0
def __init__(self, head = None):
self.head = head
# Insert job into queue (sorted by ERPT)
def insert(self, job):
current = self.head # node iterator, starts at head
previous = None
if (current == None): # if queue is empty, set current job as head
self.head = Node(job, None)
else:
while (current != None) and (job.ERPT > current.job.ERPT):
previous = current # prev = node[i]
current = current.nextNode # current = node[i+1]
# Insert new node after previous before current
if (previous == None):
self.head = Node(job, current)
else:
previous.nextNode = Node(job, current)
LinkedList.Size += 1
# Remove first item in queue
def removeHead(self):
if (LinkedList.Size > 0):
self.head = self.head.nextNode # move head forward one node
LinkedList.Size -= 1
else:
GUI.writeToConsole(self.master, "ERROR: The linked list is already empty!")
def clear(self):
self.head = None
def printList(self):
current = self.head
while (current != None):
print (current.job.name, current.job.ERPT)
current = current.nextNode
#----------------------------------------------------------------------#
# Class: JobClass
#
# This class is used to define jobs.
#
# Attributes: arrival time, processing time, remaining processing
# time, estimated remaining processing time, percent error
#----------------------------------------------------------------------#
class JobClass(object):
BPArray = []
def __init__(self, master):
self.master = master
self.arrivalTime = 0
self.procTime = 0
self.RPT = 0 # Real Remaining Processing Time
self.ERPT = 0 # Estimated Remaining Processing Time
self.percentError = 0
self.processRate = 0
self.arrivalRate = 0
#JobClass.BPArray = []
def setArrProcRates(self, load, procRate, procDist):
if procDist == 'Bounded Pareto':
alpha = JobClass.BPArray[0]
L = JobClass.BPArray[1]
U = JobClass.BPArray[2]
if alpha > 1 and L > 0:
procMean = (L**alpha/(1 - (L/U)**alpha))*(alpha/(alpha - 1))*((1/(L**(alpha - 1)))-(1/(U**(alpha - 1))))
self.processRate = 1/float(procMean)
else:
self.processRate = procRate
self.arrivalRate = float(load) * self.processRate
# Dictionary of service distributions
def setServiceDist(self, procRate, procDist):
ServiceDistributions = {
'Poisson': random.expovariate(1.0/procRate),
'Exponential': random.expovariate(procRate),
'Uniform': random.uniform(0.0, procRate),
'Bounded Pareto': self.setBoundedPareto,
'Custom': self.setCustomDist
}
if(procDist == 'Custom'):
return ServiceDistributions[procDist](procRate)
elif(procDist == 'Bounded Pareto'):
return ServiceDistributions[procDist]()
else:
return ServiceDistributions[procDist]
def setCustomDist(self, procRate):
if main.timesClicked == 0:
main.timesClicked += 1
self.popup = CustomDist(self.master)
self.master.wait_window(self.popup.top)
main.customEquation = self.popup.stringEquation
return eval(main.customEquation)
def setBoundedPareto(self):
# Get and set parameters (in job class array)
if main.timesClicked == 0:
main.timesClicked += 1
self.popup = BoundedParetoDist(self.master)
self.master.wait_window(self.popup.top)
self.alpha = float(self.popup.paramArray[0]) # Shape, power of tail, alpha = 2 is approx Expon., alpha = 1 gives higher variance
self.L = float(self.popup.paramArray[1]) # Smallest job size
self.U = float(self.popup.paramArray[2]) # Largest job size
JobClass.BPArray = [self.alpha, self.L, self.U]
x = random.uniform(0.0, 1.0)
# reassigning
alpha = JobClass.BPArray[0]
L = JobClass.BPArray[1]
U = JobClass.BPArray[2]
paretoNumerator = float(-(x*(U**alpha) - x*(L**alpha) - (U**alpha)))
paretoDenominator = float((U**alpha) * (L**alpha))
main.customEquation = (paretoNumerator/paretoDenominator)**(-1/alpha)
return main.customEquation
# Generates a percent error for processing time
def generateError(self, percErrorMin, percErrorMax):
self.percentError = random.uniform(percErrorMin, percErrorMax)
return self.percentError
# Sets all processing times for job
def setJobAttributes(self, load, procRate, procDist, percErrorMin, percErrorMax, jobArrival):
if(procDist == 'Bounded Pareto'):
self.procTime = self.setServiceDist(procRate, procDist) #use updated proc rate
self.setArrProcRates(load, procRate, procDist)
else:
self.setArrProcRates(load, procRate, procDist)
self.procTime = self.setServiceDist(procRate, procDist) #use updated proc rate
self.estimatedProcTime = (1 + (self.generateError(percErrorMin, percErrorMax)/100.0))*self.procTime
self.RPT = self.procTime
self.ERPT = self.estimatedProcTime
self.arrivalTime = jobArrival
#----------------------------------------------------------------------#
# Class: MachineClass
#
# This class is used to generate Jobs at random and process them.
#
# Entities: jobs, server
# Events: job arrives, job completes
# Activities: processing job, waiting for new job
#
#----------------------------------------------------------------------#
class MachineClass(object):
Queue = LinkedList()
JobOrderOut = []
CurrentTime = 0.0
TimeUntilArrival = 0.0
ServiceStartTime = 0
AvgNumJobs = 0
PrevTime = 0
PrevNumJobs = 0
ServerBusy = False
StopSim = False
def __init__(self, master):
self.master = master
MachineClass.ServerBusy = False
MachineClass.StopSim = False
MachineClass.Queue.clear()
LinkedList.Size = 0
MachineClass.CurrentTime = 0.0
MachineClass.TimeUntilArrival = 0.0
MachineClass.ServiceStartTime = 0
MachineClass.AvgNumJobs = 0
MachineClass.PrevTime = 0
MachineClass.PrevNumJobs = 0
NumJobs[:] = []
AvgNumJobs[:] = []
NumJobsTime[:] = []
TimeSys[:] = []
ProcTime[:] = []
PercError[:] = []
MachineClass.JobOrderOut[:] = []
self.ctr = 0
# Dictionary of arrival distributions
def setArrivalDist(self, arrRate, arrDist):
ArrivalDistributions = {
'Poisson': random.expovariate(1.0/arrRate),
'Exponential': random.expovariate(arrRate)
#'Normal': Rnd.normalvariate(self.inputInstance.valuesList[0])
#'Custom':
}
return ArrivalDistributions[arrDist]
def getProcessingJob(self):
currentJob = MachineClass.Queue.head.job
return currentJob
#update data
def updateJob(self):
currentJob = self.getProcessingJob()
serviceTime = MachineClass.CurrentTime - MachineClass.ServiceStartTime
currentJob.RPT -= serviceTime
currentJob.ERPT -= serviceTime
def calcNumJobs(self, jobID, load):
self.currentNumJobs = MachineClass.Queue.Size #NOTE: This includes job in service
self.t = MachineClass.CurrentTime
self.delta_t = self.t - MachineClass.PrevTime
# If one job in system
if(jobID == 0):
MachineClass.AvgNumJobs = 1 # First event is always create new job
# UPDATE
else:
MachineClass.AvgNumJobs = (MachineClass.PrevTime/(self.t))*float(MachineClass.AvgNumJobs) + float(MachineClass.PrevNumJobs)*(float(self.delta_t)/self.t)
# PrevTime becomes "old" t
MachineClass.PrevTime = self.t
# PrevNum jobs becomes current num jobs
MachineClass.PrevNumJobs = self.currentNumJobs
NumJobs.append(self.currentNumJobs) # y axis of plot
AvgNumJobs.append(MachineClass.AvgNumJobs) # y axis of plot
NumJobsTime.append(MachineClass.CurrentTime) # x axis of plot
self.saveNumJobs(load, MachineClass.CurrentTime, self.currentNumJobs)
self.saveAvgNumJobs(load, MachineClass.CurrentTime, MachineClass.AvgNumJobs)
def saveNumJobs(self, load, numJobs, time):
text = "%f,%f"%(numJobs, time) + "\n"
scaledLoad = int(load * 100)
path = "./SINGLE_SERVER_RESULTS/Catastrophic/SRPT_Num_load=%s_alpha=%s_servers=1_catastrophic.txt"%(scaledLoad, JobClass.BPArray[0])
with open(path, "a") as myFile:
myFile.write(text)
myFile.close()
def saveAvgNumJobs(self, load, avgNumJobs, time):
text = "%f,%f"%(avgNumJobs, time) + "\n"
scaledLoad = int(load * 100)
path = "./SINGLE_SERVER_RESULTS/Catastrophic/SRPT_Avg_load=%s_alpha=%s_servers=1_catastrophic.txt"%(scaledLoad, JobClass.BPArray[0])
with open(path, "a") as myFile:
myFile.write(text)
myFile.close()
# Job arriving
def arrivalEvent(self, load, arrDist, procRate, procDist, percErrorMin, percErrorMax):
J = JobClass(self.master)
J.setJobAttributes(load, procRate, procDist, percErrorMin, percErrorMax, MachineClass.CurrentTime)
J.name = "Job%02d"%self.ctr
GUI.writeToConsole(self.master, "%.6f | %s arrived, ERPT = %.5f"%(MachineClass.CurrentTime, J.name, J.ERPT))
self.calcNumJobs(self.ctr, load)
#self.saveArrivals(J) # save to list of arrivals, for testing
if(MachineClass.Queue.Size > 0):
self.updateJob() # update data in queue
MachineClass.Queue.insert(J) # add job to queue
self.processJob() # process first job in queue
# Generate next arrival
MachineClass.TimeUntilArrival = self.setArrivalDist(J.arrivalRate, arrDist)
self.ctr += 1
def insertLargeJob(self, counter, procDist):
J = JobClass(self.master)
J.setJobAttributes(1, 1, procDist, 0, 0, MachineClass.CurrentTime)
J.name = "JobXXXXX" + str(counter)
J.RPT = 100000
J.ERPT = 50000
GUI.writeToConsole(self.master, "%.6f | %s arrived, ERPT = %.5f"%(MachineClass.CurrentTime, J.name, J.ERPT))
self.calcNumJobs(self.ctr)
#self.saveArrivals(J) # save to list of arrivals, for testing
if(MachineClass.Queue.Size > 0):
self.updateJob() # update data in queue
MachineClass.Queue.insert(J) # add job to queue
self.processJob() # process first job in queue
# Generate next arrival
MachineClass.TimeUntilArrival = self.setArrivalDist(J.arrivalRate, 'Exponential')
#def saveArrivals(self, job):
# text = "%s, %.4f, %.4f, %.4f"%(job.name, job.arrivalTime, job.RPT, job.ERPT) + "\n"
#
# with open("Arrivals.txt", "a") as myFile:
# myFile.write(text)
# myFile.close()
# Processing first job in queue
def processJob(self):
MachineClass.ServiceStartTime = MachineClass.CurrentTime
currentJob = self.getProcessingJob()
GUI.writeToConsole(self.master, "%.6f | %s processing, ERPT = %.5f"%(MachineClass.CurrentTime, currentJob.name, currentJob.ERPT))
MachineClass.ServerBusy = True
# Job completed
def completionEvent(self, load):
currentJob = self.getProcessingJob()
MachineClass.ServerBusy = False
MachineClass.JobOrderOut.append(currentJob.name)
self.calcNumJobs(self.ctr, load)
# NumJobs.append(MachineClass.AvgNumJobs) # y axis of plot
# NumJobsTime.append(MachineClass.CurrentTime) # x axis of plot
TimeSys.append(MachineClass.CurrentTime - currentJob.arrivalTime)
ProcTime.append(currentJob.procTime)
PercError.append(abs(currentJob.percentError))
GUI.writeToConsole(self.master, "%.6f | %s COMPLTED"%(MachineClass.CurrentTime, currentJob.name))
MachineClass.Queue.removeHead() # remove job from queue
def run(self, load, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength):
counter = 1;
while 1:
if(self.ctr == 0): # set time of first job arrival
arrRate = float(load) / procRate
MachineClass.TimeUntilArrival = self.setArrivalDist(arrRate, arrDist) # generate next arrival
#Inject large jobs
if(MachineClass.CurrentTime >= 2000000.0 and counter == 1):
self.insertLargeJob(counter, procDist);
counter += 1;
print ("FIRST LARGE JOB INJECTED");
elif(MachineClass.CurrentTime >= 2000500.0 and counter == 2):
self.insertLargeJob(counter, procDist);
counter += 1;
print ("SECOND LARGE JOB INJECTED");
if (MachineClass.ServerBusy == False) or ((MachineClass.ServerBusy == True) and (MachineClass.TimeUntilArrival < self.getProcessingJob().RPT)):
#next event is arrival
MachineClass.CurrentTime += MachineClass.TimeUntilArrival
# stop server from processing current job
MachineClass.ServerBusy == False
self.arrivalEvent(load, arrDist, procRate, procDist, percErrorMin, percErrorMax)
else:
#next event is job finishing
MachineClass.CurrentTime += self.getProcessingJob().RPT
self.completionEvent(load)
if(MachineClass.Queue.Size > 0):
self.processJob()
# If current time is greater than the simulation length, end program
if (MachineClass.CurrentTime > simLength) or (MachineClass.StopSim == True):
break
#----------------------------------------------------------------------#
def main():
window = GUI(None) # instantiate the class with no parent (None)
window.title('Single Server SRPT with Errors') # title the window
# Global variables used in JobClass
main.timesClicked = 0
main.customEquation = ""
#window.geometry("500x600") # set window size
window.mainloop() # loop indefinitely, wait for events
if __name__ == '__main__': main()
| mit |
enoordeh/StatisticalMethods | examples/Fermi/SedUtils.py | 1 | 8103 | #!/usr/bin/env python
#
# Description
"""
Utilities for doing example SED manipulation
"""
__facility__ = "SedUtils.py"
__abstract__ = __doc__
__author__ = "E. Charles"
__date__ = "$Date: 2014/07/10 21:51:02 $"
__version__ = "$Revision: 1.1 $, $Author: echarles $"
__release__ = "$Name: $"
import yaml
import numpy as np
from scipy import optimize
from scipy import interpolate
import matplotlib.pyplot as plt
import LikeFitUtils as lfu
class SedBin:
""" This is a small utility class to look up the log-likelihood for a given flux in an energy bin
"""
def __init__(self,sedDict):
""" C'tor, takes a results dictionary as defined in the *_sed.yaml file
"""
self.emin = sedDict["emin"]
self.emax = sedDict["emax"]
self.emean = np.sqrt(self.emin*self.emax)
self.ewidth = self.emax - self.emin
self.eflux2npred = sedDict["eflux2npred"]
self.flux = sedDict["flux"]
self.eflux = sedDict["eflux"]
self.logLike = sedDict["logLike"]
self.interp = interpolate.interp1d(self.flux,self.logLike,bounds_error=True,fill_value=-1e5)
def __call__(self,fluxVal):
""" Return the log-likelihood for a given flux value
"""
return self.interp(fluxVal)
def logLikeFromEFlux(self,efluxVal):
""" Return the log-likelihood for a given energy flux value
"""
fluxVal = efluxVal / self.emean
return self.interp(fluxVal)
class SED:
""" This is a small utility class to add together the log-likelihoods for several energy bins
"""
def __init__(self,sedBinList):
""" C'tor, takes a list of results dictionaries as defined in the *_sed.yaml file
"""
self.binList = []
self.nBin = len(sedBinList)
self.energyBins = np.zeros((self.nBin+1))
for i,sedBinDict in enumerate(sedBinList):
sedBin = SedBin(sedBinDict)
self.binList.append(sedBin)
self.energyBins[i] = sedBin.emin
pass
self.energyBins[-1] = sedBin.emax
self.binByBinUls = None
def __call__(self,fluxVals):
""" Return the log-likelihood for a spectrum (i.e., a set of flux values for the various energy bins)
"""
retVal = 0.
for bin,fluxVal in zip(self.binList,fluxVals):
if fluxVal < 0:
retVal += 100.
else:
retVal -= bin(fluxVal)
pass
return retVal
def NLL_func(self,fluxVals):
""" Returns a function of a single normalization paramter that can be passed to a minimizer
fluxVals : The baseline flux values, we will be fitting for a globlal scale factor w.r.t. these values
"""
func = lambda x : self(x*fluxVals)
return func
def Minimize(self,fluxVals,x0):
""" Minimize the negative log-likelihood w.r.t. an overall scale factor, given an input spectrum
fluxVals : The baseline flux values, we will be fitting for a globlal scale factor w.r.t. these values
x0 : Initial value for the global scale factor
returns a scipy.optimize result object
"""
ftomin = self.NLL_func(fluxVals)
result = optimize.fmin(ftomin,[x0],full_output=1,disp=0)
return result
def BinByBinULs(self):
""" Calculat the 95% CL Upper limits in each energy bin and return them
"""
if self.binByBinUls is not None:
return self.binByBinUls
self.binByBinUls = np.ndarray((self.nBin))
for i,sedBin in enumerate(self.binList):
xbounds = (sedBin.interp.x[0],sedBin.interp.x[-1])
xinit = sedBin.interp.x[-1]*sedBin.interp.x[-1]
result = optimize.fmin(sedBin,[xinit],full_output=1,disp=0)
mle = result[0][0]
nll_min = result[1]
ul95 = lfu.SolveForErrorLevel(sedBin,nll_min,-1.35,mle,xbounds)
self.binByBinUls[i] = ul95
pass
return self.binByBinUls
def PlotSED(eBins,uls,bandDict=None):
""" Make a plot of the Spectral Energy Distribution.
In fact this just plots the values as upper limits
eBins : The energy bin edges
uls : The corresponding upper limits
bandDict : An optional dictionary with the expected upper limit quantiles for the Brazil bands
"""
figure = plt.figure()
ax = figure.add_subplot(111)
ax.set_xlabel("E [MeV]")
ax.set_ylabel(r"Energy Flux 95% CL UL [$MeV cm^{-2} s^{-1}$]")
ax.set_xlim(eBins[0],eBins[-1])
ax.set_ylim(1e-8,1e-4)
ax.set_xscale('log')
ax.set_yscale('log',nonposy='clip')
xvals = np.sqrt(eBins[0:-1]*eBins[1:])
xerr = np.array([xvals-eBins[0:-1],eBins[1:]-xvals])
yvals = xvals*uls
yerr = np.array([0.5*yvals,yvals-yvals])
if bandDict:
ax.fill_between(xvals,np.array(bandDict['q05']),np.array(bandDict['q95']),color='y',label='2sigma')
ax.fill_between(xvals,np.array(bandDict['q16']),np.array(bandDict['q84']),color='g',label='1sigma')
ax.loglog(xvals,np.array(bandDict['q50']),ls='--',color='black',label='Median')
ax.errorbar(xvals, yvals, yerr=yerr, uplims=True, lw=1, color='black', ls='none', zorder=1)
ax.errorbar(xvals, yvals, xerr=xerr, lw=1.35, color='black', ls='none', zorder=2, capsize=0)
return figure,ax
def PlotLimits(masses,uls,bandDict=None):
""" Make a plot of the upper limits as a function of mass for a DM channel
masses : The mass values (in GeV)
uls : The corresponding upper limits
bandDict : An optional dictionary with the expected upper limit quantiles for the Brazil bands
"""
figure = plt.figure()
ax = figure.add_subplot(111)
ax.set_xlabel(r"$m_{\chi}$ [GeV]")
ax.set_ylabel(r"$\langle \sigma v \rangle$ 95% CL UL")
ax.set_xlim(masses[0],masses[-1])
ax.set_ylim(1e-27,1e-22)
if bandDict:
ax.fill_between(masses,np.array(bandDict['q05']),np.array(bandDict['q95']),color='y',label='2sigma')
ax.fill_between(masses,np.array(bandDict['q16']),np.array(bandDict['q84']),color='g',label='1sigma')
ax.loglog(masses,np.array(bandDict['q50']),ls='--',color='black',label='Median')
ax.loglog(masses,uls*1e-26,ls='-',color='r',label='NLL')
return figure,ax
if __name__=='__main__':
sedData = yaml.load(open("results/draco_sed.yaml"))
sed = SED(sedData)
dmSpectra = yaml.load(open("ancil/DM_spectra.yaml"))
xbounds = (1e-4,1e6)
error_level = 1.35
"""
sed_uls = sed.BinByBinULs()
fig,ax = PlotSED(sed.energyBins,sed_uls)
"""
resultDict = {}
# Loop over channels
for chan,chanDict in dmSpectra.items():
nMass = len(chanDict)
mle = np.ndarray((nMass))
nll = np.ndarray((nMass))
ts = np.ndarray((nMass))
ul = np.ndarray((nMass))
# Loop over masses, but we want to keep things in order
masses = chanDict.keys()
masses.sort()
for i,mass in enumerate(masses):
fluxVals = chanDict[mass]
print "Working on %s channel at %.1f GeV"%(chan,mass)
nll_func = sed.NLL_func(fluxVals)
result = sed.Minimize(fluxVals,1.0)
mle[i] = result[0][0]
nll[i] = result[1]
nll_null = nll_func(0)
ts[i] = 2.*(nll_null-nll[i])
ul[i] = lfu.SolveForErrorLevel(nll_func,nll[i],error_level,mle[i],xbounds)
pass
resultDict[chan] = {"Masses":masses,
"MLE":mle,
"NLL":nll,
"TS":ts,
"UL95":ul}
pass
out = open("results/draco_dm_results.yaml",'w!')
out.write( yaml.dump(resultDict) )
out.close()
bands = yaml.load(open("ancil/draco_spectral_mc_bands.yaml"))
a,f = PlotLimits(resultDict['bb']['Masses'],resultDict['bb']['UL95'],bands['bb']['ulimits'])
| gpl-2.0 |
arabenjamin/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | sklearn/neighbors/tests/test_lof.py | 34 | 4142 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from math import sqrt
import numpy as np
from sklearn import neighbors
from numpy.testing import assert_array_equal
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.datasets import load_iris
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof():
# Toy sample (the last two samples are outliers):
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]]
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert_greater(np.min(score[:-2]), np.max(score[-2:]))
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25,
n_neighbors=5).fit(X)
assert_array_equal(clf._predict(), 6 * [1] + 2 * [-1])
def test_lof_performance():
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = neighbors.LocalOutlierFactor().fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf._decision_function(X_test)
# check that roc_auc is good
assert_greater(roc_auc_score(y_test, y_pred), .99)
def test_lof_values():
# toy samples:
X_train = [[1, 1], [1, 2], [2, 1]]
clf = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X_train)
s_0 = 2. * sqrt(2.) / (1. + sqrt(2.))
s_1 = (1. + sqrt(2)) * (1. / (4. * sqrt(2.)) + 1. / (2. + 2. * sqrt(2)))
# check predict()
assert_array_almost_equal(-clf.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_array_almost_equal(-clf._decision_function([[2., 2.]]), [s_0])
# # check predict(one sample already in train)
assert_array_almost_equal(-clf._decision_function([[1., 1.]]), [s_1])
def test_lof_precomputed(random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X._predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute',
metric='precomputed')
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D._predict(DYX)
assert_array_almost_equal(pred_X_X, pred_D_X)
assert_array_almost_equal(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
assert_warns_message(UserWarning,
"n_neighbors will be set to (n_samples - 1)",
clf.fit, X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
| bsd-3-clause |
GaZ3ll3/scikit-image | doc/examples/plot_contours.py | 30 | 1247 | """
===============
Contour finding
===============
``skimage.measure.find_contours`` uses a marching squares method to find
constant valued contours in an image. Array values are linearly interpolated
to provide better precision of the output contours. Contours which intersect
the image edge are open; all others are closed.
The `marching squares algorithm
<http://www.essi.fr/~lingrand/MarchingCubes/algo.html>`__ is a special case of
the marching cubes algorithm (Lorensen, William and Harvey E. Cline. Marching
Cubes: A High Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
# Construct some test data
x, y = np.ogrid[-np.pi:np.pi:100j, -np.pi:np.pi:100j]
r = np.sin(np.exp((np.sin(x)**3 + np.cos(y)**2)))
# Find contours at a constant value of 0.8
contours = measure.find_contours(r, 0.8)
# Display the image and plot all contours found
fig, ax = plt.subplots()
ax.imshow(r, interpolation='nearest', cmap=plt.cm.gray)
for n, contour in enumerate(contours):
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| bsd-3-clause |
kevinpetersavage/BOUT-dev | examples/non-local_1d/analyse_heatflux_limits.py | 3 | 4394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Runs the conduction example, produces some output
#
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
nproc = 1 # Number of processors to use
from boututils import shell, launch, plotdata
from boutdata import collect
import numpy as np
from sys import argv, exit
from math import sqrt, log10, pow, pi
from matplotlib import pyplot
def sign(x):
if x>=0.:
return 1.
else:
return -1.
massunit = 1.602176565e-19
electron_mass = old_div(9.10938291e-31,massunit)
electron_charge = -1.602176565e-19
ion_mass = old_div(3.34358348e-27,massunit)
ion_charge = 1.602176565e-19
epsilon_0 = old_div(8.85418781762039e-12,pow(massunit,-1))
logLambda = 16.0
stagger = True
t_interval = 10
# Collect the data
Te = collect("T_electron", path="data", info=True, yguards=True)
Ti = collect("T_ion", path="data", info=True, yguards=True)
n = collect("n_ion", path="data", info=True, yguards=True)
ylength = len(Te[0,2,:,0])
tlength = len(Te[:,2,0,0])
try: dy = collect("dy", path="data", info=True, yguards=True)
except TypeError:
print("Warning, could not find dy, setting to 1.0")
dy=[[1.]*ylength]*5
try: g_22 = collect("g_22", path="data", info=True, yguards=True)
except TypeError:
print("Warning, could not find g_22, setting to (80./256)^2")
g_22=[[pow(old_div(80.,256),2)]*ylength]*5
try:
qin = collect("heat_flux", path="data", info=True, yguards=True)
q = [[0.]*ylength for i in range(0,old_div(tlength,t_interval)+1)]
for i in range(0,tlength,t_interval):
for j in range(2,ylength-2):
q[old_div(i,t_interval)][j] = qin[i,2,j,0]
except TypeError:
print("Calculating Braginskii heat flux")
q = [[0.]*ylength for i in range(0,old_div(tlength,t_interval)+1)]
tau_ei = 0
gradT = 0
for i in range(0,tlength,t_interval):
for j in range(2,ylength-2):
tau_ei = 3 * pow(pi,1.5) * pow(epsilon_0,2) * sqrt(electron_mass) * pow(2.,1.5) * pow(Te[i,2,j,0],1.5) / n[i,2,j,0] / pow(electron_charge,2) / pow(ion_charge,2) / logLambda
gradT = (Te[i,2,j-2,0] - 8.*Te[i,2,j-1,0] + 8.*Te[i,2,j+1,0] - Te[i,2,j+2,0])/12./dy[2][j]/sqrt(g_22[2][j])
q[old_div(i,t_interval)][j] = -3.16*n[i,2,j,0]*Te[i,2,j,0]*tau_ei/electron_mass*gradT
stagger = False
Temax = [0.]*(old_div(tlength,t_interval)+1)
for i in range(0,tlength,t_interval):
Temax[old_div(i,t_interval)] = max(Te[i,2,:,0])
Timax = [0.]*(old_div(tlength,t_interval)+1)
for i in range(0,tlength,t_interval):
Timax[old_div(i,t_interval)] = max(Ti[i,2,:,0])
nmax = [0.]*(old_div(tlength,t_interval)+1)
for i in range(0,tlength,t_interval):
nmax[old_div(i,t_interval)] = max(n[i,2,:,0])
freestreammax = [0.]*(old_div(tlength,t_interval)+1)
for i in range(old_div(tlength,t_interval)+1):
freestreammax[i]=3./2.*nmax[i]*Temax[i]*sqrt(old_div((Temax[i]+Timax[i]),ion_mass))
#freestreammax[i]=3./2.*nmax[i]*Temax[i]*sqrt(2*Temax[i]/electron_mass)
#freestream = [[i]*ylength for i in range(tlength/t_interval+1)]
#for i in range(0,tlength,t_interval):
#for j in range(2,ylength-2):
#freestream[i/t_interval][j] = sign(j-ylength/2)*3./2.*n[i,2,j,0]*Te[i,2,j,0]*sqrt((Te[i,2,j,0]+Ti[i,2,j,0])/ion_mass)
#freestream = [[i]*ylength for i in range(tlength/t_interval+1)]
#for i in range(0,tlength,t_interval):
#for j in range(2,ylength-2):
#freestream[i/t_interval][j] = sign(j-ylength/2)*3./2.*n[i,2,j,0]*Te[i,2,j,0]*sqrt(2*Te[i,2,j,0]/electron_mass)
#for i in range(0,tlength,t_interval):
#pyplot.figure(i)
#pyplot.plot(range(2,ylength-2),q[i/t_interval][2:-2],'k', range(2,ylength-2),freestream[i/t_interval][2:-2], 'r')
for i in range(0,tlength,t_interval):
pyplot.figure(i)
pyplot.plot(list(range(2,ylength-2)),q[old_div(i,t_interval)][2:-2],'k', list(range(2,ylength-2)),[freestreammax[old_div(i,t_interval)]]*(ylength-4), 'r',[-freestreammax[old_div(i,t_interval)]]*(ylength-4), 'r')
pyplot.show()
#pyplot.figure(5)
#pyplot.plot(logtime,q_electron_left,'r',logtime,q_ion_left,'b',logtime,q_total_left,'k')
#pyplot.title("Electron (red), Ion (blue) and Total (black) Heat Flux vs log(t)")
#pyplot.figure(6)
#pyplot.plot(q_electron_left,'r',q_ion_left,'b',q_total_left,'k')
#pyplot.title("Electron (red), Ion (blue) and Total (black) Heat Flux")
#pyplot.xlabel(u"t/μs")
#pyplot.ylabel("Q/W.m$^{-2}$")
#pyplot.show()
| gpl-3.0 |
yunfeilu/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
stuart-knock/bokeh | examples/plotting/file/boxplot.py | 43 | 2269 | import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_file
# Generate some synthetic time series for six different categories
cats = list("abcdef")
yy = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
yy[g == l] += i // 2
df = pd.DataFrame(dict(score=yy, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_file("boxplot.html")
p = figure(tools="save", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
shafiquejamal/easyframes | easyframes/test/test_egen.py | 1 | 4771 | import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
# from easyframes.easyframes import hhkit
class TestEgen(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df = pd.DataFrame(
{'educ': {0: 'secondary', 1: 'bachelor', 2: 'primary', 3: 'higher', 4: 'bachelor', 5: 'secondary',
6: 'higher', 7: 'higher', 8: 'primary', 9: 'primary'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.my_include = np.array([False, False, False, True, True, False, True, True, True, False])
self.my_include_using_integer = np.array([0, 0, 0, 1, 5, 0, -10, -30, -1, 0])
self.my_include_using_float = np.array([0, 0, 0, 1, 10.3, 0, -10, -30, -1, 0])
self.my_include_using_nonnumeric = np.array(['0', 0, 0, 1, 10.3, 0, -10, -30, -1, 0])
def test_reject_both_exclude_and_include(self):
myhhkit = hhkit()
try:
df2 = myhhkit.egen(operation='count', groupby='hh', col='hh',
exclude=self.my_include, include=self.my_include)
except:
return True
raise Exception("Both include and exclude were allowed")
def test_no_include_no_exclude_includes_all_rows(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh')
correct_values = pd.Series([3, 3, 3, 1, 2, 2, 4, 4, 4, 4])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_include_yields_correct_results_count(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh', include=self.my_include)
correct_values = pd.Series([np.nan, np.nan, np.nan, 1, 1, 1, 3, 3, 3, 3])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_include_yields_correct_results_mean(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', include=self.my_include)
correct_values = pd.Series([np.nan, np.nan, np.nan, 70, 23, 23, 26.666666, 26.666666,
26.666666, 26.666666])
assert_series_equal(correct_values, myhhkit.df['(mean) age by hh'])
def test_specify_exclude_yields_correct_results_count(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh', exclude=self.my_include)
correct_values = pd.Series([3, 3, 3, np.nan, 1, 1, 1, 1, 1, 1])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_exclude_yields_correct_results_mean(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', exclude=self.my_include)
correct_values = pd.Series([33.333333, 33.333333, 33.333333, np.nan, 20, 20, 15, 15, 15, 15])
assert_series_equal(correct_values, myhhkit.df['(mean) age by hh'])
def test_using_numeric_exclude_type(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', exclude=self.my_include_using_integer)
correct_values = pd.Series([33.333333, 33.333333, 33.333333, np.nan, 20, 20, 15, 15, 15, 15])
assert_series_equal(correct_values, myhhkit.df['(mean) age by hh'])
def test_using_float_exclude_type(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', exclude=self.my_include_using_float)
correct_values = pd.Series([33.333333, 33.333333, 33.333333, np.nan, 20, 20, 15, 15, 15, 15])
assert_series_equal(correct_values, myhhkit.df['(mean) age by hh'])
def test_using_nonnumeric_exclude_type(self):
myhhkit = hhkit()
try:
df2 = myhhkit.egen(operation='mean', groupby='hh', column='age', exclude=self.my_include_using_nonnumeric)
except:
return True
return False
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
caisq/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 46 | 13101 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
yongtang/tensorflow | tensorflow/tools/compatibility/renames_v2.py | 6 | 61072 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.debugging.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.debugging.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.control_flow_v2_enabled':
'tf.compat.v1.control_flow_v2_enabled',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_tensor_equality':
'tf.compat.v1.disable_tensor_equality',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.experimental.ParameterServerStrategy':
'tf.compat.v1.distribute.experimental.ParameterServerStrategy',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_tensor_equality':
'tf.compat.v1.enable_tensor_equality',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.estimator.tpu.experimental.EmbeddingConfigSpec':
'tf.compat.v1.estimator.tpu.experimental.EmbeddingConfigSpec',
'tf.executing_eagerly_outside_functions':
'tf.compat.v1.executing_eagerly_outside_functions',
'tf.experimental.output_all_intermediates':
'tf.compat.v1.experimental.output_all_intermediates',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.experimental.export_saved_model':
'tf.compat.v1.keras.experimental.export_saved_model',
'tf.keras.experimental.load_from_saved_model':
'tf.compat.v1.keras.experimental.load_from_saved_model',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.layers.disable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.disable_v2_dtype_behavior',
'tf.keras.layers.enable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.enable_v2_dtype_behavior',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.FLOAT16':
'tf.compat.v1.lite.constants.FLOAT16',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.mixed_precision.DynamicLossScale':
'tf.compat.v1.mixed_precision.DynamicLossScale',
'tf.mixed_precision.FixedLossScale':
'tf.compat.v1.mixed_precision.FixedLossScale',
'tf.mixed_precision.LossScale':
'tf.compat.v1.mixed_precision.LossScale',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mixed_precision.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.mixed_precision.MixedPrecisionLossScaleOptimizer',
'tf.mixed_precision.disable_mixed_precision_graph_rewrite':
'tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite',
'tf.mixed_precision.enable_mixed_precision_graph_rewrite':
'tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite',
'tf.mod':
'tf.math.floormod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.compat.v1.nn.ctc_loss_v2',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.DEBUG_DIRECTORY':
'tf.saved_model.DEBUG_DIRECTORY',
'tf.saved_model.constants.DEBUG_INFO_FILENAME_PB':
'tf.saved_model.DEBUG_INFO_FILENAME_PB',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.MethodNameUpdater':
'tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_max':
'tf.compat.v1.scatter_nd_max',
'tf.scatter_nd_min':
'tf.compat.v1.scatter_nd_min',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.PaddingSpec':
'tf.compat.v1.tpu.PaddingSpec',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.experimental.AdagradParameters':
'tf.compat.v1.tpu.experimental.AdagradParameters',
'tf.tpu.experimental.AdamParameters':
'tf.compat.v1.tpu.experimental.AdamParameters',
'tf.tpu.experimental.FtrlParameters':
'tf.compat.v1.tpu.experimental.FtrlParameters',
'tf.tpu.experimental.StochasticGradientDescentParameters':
'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters',
'tf.tpu.experimental.embedding_column':
'tf.compat.v1.tpu.experimental.embedding_column',
'tf.tpu.experimental.shared_embedding_columns':
'tf.compat.v1.tpu.experimental.shared_embedding_columns',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.tpu.XLAOptions':
'tf.compat.v1.tpu.XLAOptions',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.experimental.disable_mixed_precision_graph_rewrite':
'tf.compat.v1.train.experimental.disable_mixed_precision_graph_rewrite',
'tf.train.experimental.enable_mixed_precision_graph_rewrite':
'tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
| apache-2.0 |
AvengersPy/MyPairs | testPython2/pairtrade.py | 1 | 11655 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
import pytz
from scipy import stats
import csv
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
from Equity import *
from sscore import *
@batch_transform
def ols_transform(data, sid1, sid2):
"""Computes regression coefficient (slope and intercept)
via Ordinary Least Squares between two SIDs.
"""
## original one
#p0 = data.price[sid1].values
#p1 = sm.add_constant(data.price[sid2], prepend=True)
#slope, intercept = sm.OLS(p0, p1).fit().params
##np polyfit
#pStk = data.price[sid1].values
#pEtf = data.price[sid2].values
#para = np.polyfit(pStk, pEtf, 1)
#slope = para[0]
#intercept = para[1]
## calculate returns
pStk = data.price[sid1].values
pStkRtn = np.log(data.price[sid1].values[1:]) - np.log(data.price[sid1].values[0:-1])
pEtf = data.price[sid2].values
pEtfRtn = np.log(data.price[sid2].values[1:]) - np.log(data.price[sid2].values[0:-1])
slope, intercept, r_value, p_value, std_err = stats.linregress(pEtfRtn, pStkRtn)
# record the last prices for calculating returns!
lastStkPrice = pStk[-1]
lastEtfPrice = pEtf[-1]
return slope, intercept, lastStkPrice, lastEtfPrice
class Pairtrade(TradingAlgorithm):
"""Pairtrading relies on cointegration of two stocks.
The expectation is that once the two stocks drifted apart
(i.e. there is spread), they will eventually revert again. Thus,
if we short the upward drifting stock and long the downward
drifting stock (in short, we buy the spread) once the spread
widened we can sell the spread with profit once they converged
again. A nice property of this algorithm is that we enter the
market in a neutral position.
This specific algorithm tries to exploit the cointegration of
CSCO and Coca Cola by estimating the correlation between the
two. Divergence of the spread is evaluated by z-scoring.
"""
def initialize(self, stkSymbol, etfSymbol, window_length=50):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length, window_length=self.window_length)
# create Equity objs
self.stk = Equity(stkSymbol)
self.etf = Equity(etfSymbol)
# need to record the last prices for calculating returns!
self.lastStkPrice = 0
self.lastEtfPrice = 0
# regression parameters
self.beta = 0
self.alpha = 0
## Stop Loss
self.STKSHARE = 100 # const stk share
self.STOPLOSS = -0.05 # stop if loss more than 10%
self.initBP = 0 # Used buying power after open the position
self.closeBP = 0
self.unPNL = 0
self.PNL = 0
## max holding time
self.holdingTime = 0
self.MAX_HOLDING = 40
def handle_data(self, data):
######################################################
# 1. Compute regression coefficients between CSCO and SPY
params = self.ols_transform.handle_data(data, self.stk.symbol, self.etf.symbol)
if params is None:
return
self.beta, self.alpha, self.lastStkPrice, self.lastEtfPrice = params
######################################################
# 2. Compute spread and zscore
#zscore = self.compute_zscore(data)
zscore = self.compute_sscore(data)
self.record(zscores=zscore)
######################################################
# 3. Place orders
self.place_orders(data, zscore)
######################################################
# 4. Max Holding
self.maxHoldingTime(data)
######################################################
# 5. Stop Loss
self.unRealizedPNL(data)
def unRealizedPNL(self, data):
if self.invested:
self.stk.unRealizedPNL = (data[self.stk.symbol].price - self.stk.openPrice) * self.stk.share
self.etf.unRealizedPNL = (data[self.etf.symbol].price - self.etf.openPrice) * self.etf.share
self.unPNL = self.stk.unRealizedPNL + self.etf.unRealizedPNL
if self.unPNL / self.initBP < self.STOPLOSS:
self.sell_spread() # close position
print "{} {}, {} {}, BP {}".format(data[self.stk.symbol].datetime.date(), "Stop Loss", self.stk.symbol, self.etf.symbol, self.initBP)
self.PNL += self.unPNL
#print "realized PNL: {}".format(self.unPNL)
def maxHoldingTime(self, data):
"""close pair position if hold too long
"""
if self.invested:
#print "holding: {}".format(self.holdingTime)
self.holdingTime += 1
if self.holdingTime > self.MAX_HOLDING:
self.sell_spread();
print "{} {}, {} {}, BP {}".format(data[self.stk.symbol].datetime.date(), "Exceeded Max Holding", self.stk.symbol, self.etf.symbol, self.initBP)
self.PNL += self.unPNL
#print "realized PNL: {}".format(self.unPNL)
def compute_zscore(self, data):
"""1. Compute the spread given slope and intercept.
2. zscore the spread.
"""
lastStkRtn = np.log(data[self.stk.symbol].price/self.lastStkPrice)
lastEtfRtn = np.log(data[self.etf.symbol].price/self.lastEtfPrice)
spread = lastStkRtn - (self.beta * lastEtfRtn + self.alpha)
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length : ] # the last several residuals
zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)
return zscore
def compute_sscore(self, data):
lastStkRtn = np.log(data[self.stk.symbol].price/self.lastStkPrice)
lastEtfRtn = np.log(data[self.etf.symbol].price/self.lastEtfPrice)
spread = lastStkRtn - (self.beta * lastEtfRtn + self.alpha)
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length : ] # the last several residuals
if (len(spread_wind) >= self.window_length):
pass
sscore = score(spread_wind, self.window_length)
return sscore
def place_orders(self, data, zscore):
"""Buy spread if zscore is > 2, sell if zscore < .5.
"""
if zscore >= 2.0 and not self.invested:
# zscore positive, stock overperformed, short stk
self.stk.openPrice = data[self.stk.symbol].price
self.stk.share = -self.STKSHARE
self.etf.openPrice = data[self.etf.symbol].price
self.etf.share = int(self.STKSHARE * self.beta * self.stk.openPrice / self.etf.openPrice)
self.order(self.stk.symbol, self.stk.share)
self.order(self.etf.symbol, self.etf.share)
self.stk.initBP = self.stk.share * self.stk.openPrice
self.etf.initBP = self.etf.share * self.etf.openPrice
self.initBP = abs(self.stk.initBP) + abs(self.etf.initBP)
print "{} {}, {} {}, BP {}".format(data[self.stk.symbol].datetime.date(), "Long ETF Opened", self.stk.symbol, self.etf.symbol, self.initBP)
self.invested = True
elif zscore <= -2.0 and not self.invested:
# zscore positive, etf overperformed, short etf
self.stk.openPrice = data[self.stk.symbol].price
self.stk.share = self.STKSHARE
self.etf.openPrice = data[self.etf.symbol].price
self.etf.share = -int(self.STKSHARE * self.beta * self.stk.openPrice / self.etf.openPrice)
self.order(self.stk.symbol, self.stk.share)
self.order(self.etf.symbol, self.etf.share)
self.stk.initBP = self.stk.share * self.stk.openPrice
self.etf.initBP = self.etf.share * self.etf.openPrice
self.initBP = abs(self.stk.initBP) + abs(self.etf.initBP)
print "{} {}, {} {}, BP {}".format(data[self.stk.symbol].datetime.date(), "Long STK Opened", self.stk.symbol, self.etf.symbol, self.initBP)
self.invested = True
elif abs(zscore) < .5 and self.invested:
self.sell_spread() # close position
print "{} {}, {} {}, BP {}".format(data[self.stk.symbol].datetime.date(), "Closed", self.stk.symbol, self.etf.symbol, self.initBP)
self.PNL += self.unPNL
#print "realized PNL: {}".format(self.unPNL)
def sell_spread(self):
"""
decrease exposure, regardless of position long/short.
buy for a short position, sell for a long.
"""
etf_amount = self.portfolio.positions[self.etf.symbol].amount
self.order(self.etf.symbol, -1 * etf_amount)
stk_amount = self.portfolio.positions[self.stk.symbol].amount
self.order(self.stk.symbol, -1 * stk_amount)
self.initBP = 0 # set buying power to 0
self.invested = False
self.holdingTime = 0
if __name__ == '__main__':
start = datetime(2011, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2012, 1, 1, 0, 0, 0, 0, pytz.utc)
sym = list(pd.read_csv('dia.csv')['Symbol'])
stksymbol = 'AA'
etfsymbol = 'DIA'
#errSymbol = []
#portfResult = {}
#for i in range(2):
# stksymbol = sym[i]
# try:
# data = load_from_yahoo(stocks=[stksymbol, etfsymbol], indexes={}, start=start, end=end)
# except IOError as e:
# print "Cannot get {} from yahoo".format(stksymbol)
# errSymbol.append(stksymbol)
# continue
# pairtrade = Pairtrade(stksymbol, etfsymbol)
# results = pairtrade.run(data)
# data['spreads'] = np.nan
# portfResult[stksymbol] = results.portfolio_value[-1] - results.portfolio_value[0]
#print errSymbol
#with open('dict.csv', 'w') as outfile:
# writer = csv.writer(outfile)
# for key, value in portfResult.items():
# writer.writerow([key, value])
############## test one symbol ##########################
data = load_from_yahoo(stocks=[stksymbol, etfsymbol], indexes={}, start=start, end=end)
pairtrade = Pairtrade(stksymbol, etfsymbol)
results = pairtrade.run(data)
data['spreads'] = np.nan
print results.portfolio_value
print results.portfolio_value[-1] - results.portfolio_value[0]
ax1 = plt.subplot(311)
data[[stksymbol, etfsymbol]].plot(ax=ax1)
plt.ylabel('price')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(312, sharex=ax1)
results.zscores.plot(ax=ax2, color='r')
plt.ylabel('zscored spread')
ax3 = plt.subplot(313, sharex = ax1)
results.portfolio_value.plot(ax=ax3)
plt.ylabel('portfolio value')
plt.gcf().set_size_inches(18, 8)
plt.show()
| apache-2.0 |
tdhopper/scikit-learn | sklearn/naive_bayes.py | 70 | 28476 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
drscotthawley/SHAART | source/modegraphwidget.py | 1 | 5450 | # Python Qt5 bindings for GUI objects
from PyQt5 import QtGui, QtWidgets
# import the Qt5Agg FigureCanvas object, that binds Figure to
# Qt5Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt5agg \
import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
from matplotlib import cm
import matplotlib.mlab as mlab
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
import numpy as np
from mpl_toolkits.mplot3d import axes3d
#from scipy.misc import imresize
from scipy import ndimage
global color_axial, color_tangential, color_oblique
color_axial = 'r'
color_tangential = 'b'
color_oblique = 'purple'
class ModeGraphCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
self.ax = self.fig.clear()
self.ax = self.fig.add_subplot(111)
self.fig.subplots_adjust(left=0.12,right=0.98,bottom=0.105, top=.92)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class ModeGraphWidget(QtWidgets.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtWidgets.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = ModeGraphCanvas()
# create a vertical box layout
self.vbl = QtWidgets.QVBoxLayout()
# add mode graph widget to vertical box
self.vbl.addWidget(self.canvas)
# add interactive navigation
self.navi_toolbar = NavigationToolbar(self.canvas, self)
self.vbl.addWidget(self.navi_toolbar)
# set the layout to th vertical box
self.setLayout(self.vbl)
def plot_mode(self, f0, deltaf, modetype, sums):
n = 200
fplothalfwidth = 2.5*deltaf
fstart = f0 - fplothalfwidth
fend = f0 + fplothalfwidth
df = 2*fplothalfwidth / n
f = np.arange( fstart, fend+df, df)
# modeshape = np.exp(-((f-f0)/deltaf)**2) # gaussian
dB = -10*((f-f0)/deltaf)**2 # log of a gaussian is a parabola
if (2 == modetype): # axial modes
colorstr = color_axial
dB = [x + 1 for x in dB]
elif (1 == modetype): # tangential modes
colorstr = color_tangential
else: # oblique modes
colorstr = color_oblique
dB = [x - 1 for x in dB]
modeshape = [10.0**x for x in dB]
#line, = ax.plot(f, np.ma.log10(modeshape), colorstr) # plot the mode shape itself
# make a tiny tick mark for the mode
ticklen = 2
tickfs = [f0,f0]
if0 = len(f)/2
tickstart = 7 - 2*modetype
tickps = [tickstart, tickstart + ticklen]
line2, = self.canvas.ax.plot(tickfs, tickps, colorstr) # draw a tick at the mode frequency
if sums is not None:
#ifsave = 0 # this was used to prevent double-counting #TODO: <--- This is wrong! Double counting is good!
for i in range(len(f)):
ifreq = int(f[i])
if (ifreq > 0) & (ifreq < len(sums)):
#if (ifreq != ifsave):
sums[ifreq] = sums[ifreq] + modeshape[i]
#ifsave = ifreq
def update_graph(self,modes,X,Y,Z):
"""Updates the graph with new data/annotations"""
nmodes = len(modes)
self.canvas.ax.clear()
sum_fmax =250
dB_min = -30
dB_max = 10
sums = np.zeros(sum_fmax)
for m in modes:
numzeros = m.count(0)
f0 = m[0]
rt60 = 0.4
deltaf = 2.2/rt60
self.plot_mode(f0, deltaf, numzeros, sums)
line2, = self.canvas.ax.plot(range(len(sums)), np.ma.log10(sums), 'k')
pl = self.canvas.ax
# Global Plot characteristics
pl.axis([0,sum_fmax,dB_min,dB_max])
self.canvas.ax.set_xlabel('Frequency (Hz)')
self.canvas.ax.set_ylabel('Relative sound-pressure level (dB)')
title = "Theoretical Steady-State Room Response"
self.canvas.ax.set_title(title)
pl.grid(True)
# cheap hack to make a legend
xs = [sum_fmax]
ys = [dB_min]
pA, = self.canvas.ax.plot(xs,ys,color=color_axial)
pB, = self.canvas.ax.plot(xs,ys,color=color_tangential)
pC, = self.canvas.ax.plot(xs,ys,color=color_oblique)
l1 = self.canvas.ax.legend([pA,pB,pC], ['axial modes','tangential modes','oblique modes'], loc=4)
l1.draw_frame(False) # no box around the legend
"""Actually draw everything"""
self.canvas.draw()
| gpl-2.0 |
madcowswe/ODrive | tools/motion_planning/PlanTrap.py | 2 | 8250 | # Copyright (c) 2018 Paul Guénette
# Copyright (c) 2018 Oskar Weigl
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This algorithm is based on:
# FIR filter-based online jerk-constrained trajectory generation
# https://www.researchgate.net/profile/Richard_Bearee/publication/304358769_FIR_filter-based_online_jerk-controlled_trajectory_generation/links/5770ccdd08ae10de639c0ff7/FIR-filter-based-online-jerk-controlled-trajectory-generation.pdf
import numpy as np
import math
import matplotlib.pyplot as plt
import random
# Symbol Description
# Ta, Tv and Td Duration of the stages of the AL profile
# Xi and Vi Adapted initial conditions for the AL profile
# Xf Position set-point
# s Direction (sign) of the trajectory
# Vmax, Amax, Dmax and jmax Kinematic bounds
# Ar, Dr and Vr Reached values of acceleration and velocity
# Test scales:
pos_range = 10000.0
Vmax_range = 8000.0
Amax_range = 10000.0
plot_range = 10000.0
def PlanTrap(Xf, Xi, Vi, Vmax, Amax, Dmax):
dX = Xf - Xi # Distance to travel
stop_dist = Vi**2 / (2*Dmax) # Minimum stopping distance
dXstop = np.sign(Vi)*stop_dist # Minimum stopping displacement
s = np.sign(dX - dXstop) # Sign of coast velocity (if any)
Ar = s*Amax # Maximum Acceleration (signed)
Dr = -s*Dmax # Maximum Deceleration (signed)
Vr = s*Vmax # Maximum Velocity (signed)
# If we start with a speed faster than cruising, then we need to decel instead of accel
# aka "double deceleration move" in the paper
if s*Vi > s*Vr:
print("Handbrake!")
Ar = -s*Amax
# Time to accel/decel to/from Vr (cruise speed)
Ta = (Vr-Vi)/Ar
Td = -Vr/Dr
# Integral of velocity ramps over the full accel and decel times to get
# minimum displacement required to reach cuising speed
dXmin = Ta*(Vr+Vi)/2.0 + Td*(Vr)/2.0
# Are we displacing enough to reach cruising speed?
if s*dX < s*dXmin:
print("Short Move:")
# From paper:
# Vr = s*math.sqrt((-(Vi**2/Ar)-2*dX)/(1/Dr-1/Ar))
# Simplified for less divisions:
Vr = s*math.sqrt((Dr*Vi**2 + 2*Ar*Dr*dX) / (Dr-Ar))
Ta = max(0, (Vr - Vi)/Ar)
Td = max(0, -Vr/Dr)
Tv = 0
else:
print("Long move:")
Tv = (dX - dXmin)/Vr # Coasting time
Tf = Ta+Tv+Td
print("Xi: {:.2f}\tXf: {:.2f}\tVi: {:.2f}".format(Xi, Xf, Vi))
print("Amax: {:.2f}\tVmax: {:.2f}\tDmax: {:.2f}".format(Amax, Vmax, Dmax))
print("dX: {:.2f}\tdXst: {:.2f}\tdXmin: {:.2f}".format(dX, dXstop, dXmin))
print("Ar: {:.2f}\tVr: {:.2f}\tDr: {:.2f}".format(Ar, Vr, Dr))
print("Ta: {:.2f}\tTv: {:.2f}\tTd: {:.2f}".format(Ta, Tv, Td))
return (Ar, Vr, Dr, Ta, Tv, Td, Tf)
def EvalTrap(Xf, Xi, Vi, Ar, Vr, Dr, Ta, Tv, Td, Tf):
# Create the time series and preallocate the position, velocity, and acceleration arrays
t_traj = np.arange(0, Tf+0.1, 1/10000)
y = [None]*len(t_traj)
yd = [None]*len(t_traj)
ydd = [None]*len(t_traj)
# We only know acceleration (Ar and Dr), so we integrate to create
# the velocity and position curves
y_Accel = Xi + Vi*Ta + 0.5*Ar*Ta**2
for i in range(len(t_traj)):
t = t_traj[i]
if t < 0: # Initial conditions
y[i] = Xi
yd[i] = Vi
ydd[i] = 0
elif t < Ta: # Acceleration
y[i] = Xi + Vi*t + 0.5*Ar*t**2
yd[i] = Vi + Ar*t
ydd[i] = Ar
elif t < Ta+Tv: # Coasting
y[i] = y_Accel + Vr*(t-Ta)
yd[i] = Vr
ydd[i] = 0
elif t < Tf: # Deceleration
td = t-Tf
y[i] = Xf + 0*td + 0.5*Dr*td**2
yd[i] = 0 + Dr*td
ydd[i] = Dr
elif t >= Tf: # Final condition
y[i] = Xf
yd[i] = 0
ydd[i] = 0
else:
raise ValueError("t = {} is outside of considered range".format(t))
dy = np.diff(y)
dy_max = np.max(np.abs(dy))
dyd = np.diff(yd)
dyd_max = np.max(np.abs(dyd))
print("dy_max: {:.2f}\tdyd_max: {:.2f}".format(dy_max, dyd_max))
error = False
if dy_max/pos_range > 0.001:
print("---------- Bad Pos Continuity --------------------")
error = True
if dyd_max/Vmax_range > 0.001:
print("---------- Bad Vel Continuity --------------------")
error = True
if abs(Xi-y[0]) > 0.0001:
print("---------- Bad Initial Position --------------------")
error = True
if abs(Xf-y[-1]) > 0.0001:
print("---------- Bad Final Position --------------------")
error = True
if abs(Vi-yd[0]) > 0.0001:
print("---------- Bad Initial Velocity --------------------")
error = True
if abs(yd[-1]) > 0.0001:
print("---------- Bad Final Velocity --------------------")
error = True
if error:
import ipdb; ipdb.set_trace()
return (y, yd, ydd, t_traj)
def graphical_test():
numRows = 3
numCols = 5
fig, axes = plt.subplots(numRows, numCols)
random.seed(3) # Repeatable tests by using specific seed
for x in range(numRows*numCols):
rownow = int(x/numCols)
colnow = x % numCols
print("row: {}, col: {}".format(rownow, colnow))
Vmax = random.uniform(0.1*Vmax_range, Vmax_range)
Amax = random.uniform(0.1*Amax_range, Amax_range)
Dmax = Amax
Xf = random.uniform(-pos_range, pos_range)
Xi = random.uniform(-pos_range, pos_range)
if random.random() <= 0.5:
Vi = random.uniform(-Vmax*1.5, Vmax*1.5)
else:
Vi = 0
(Ar, Vr, Dr, Ta, Tv, Td, Tf) = PlanTrap(Xf, Xi, Vi, Vmax, Amax, Dmax)
(Y, Yd, Ydd, t) = EvalTrap(Xf, Xi, Vi, Ar, Vr, Dr, Ta, Tv, Td, Tf)
# Plotting
ax1 = axes[rownow, colnow]
# Vel limits (draw first for clearer z-order)
ax1.plot([t[0], t[-1]], [Vmax, Vmax], 'g--')
ax1.plot([t[0], t[-1]], [-Vmax, -Vmax], 'g--')
ax1.plot(t, Y) # Pos
ax1.plot(t, Yd) # Vel
ax1.plot(0, Xi, 'bo') # Pos Initial
ax1.plot(0, Vi, 'ro') # Vel Initial
## TODO: pull out Ta+Td+Td from planner for correct plot points
ax1.plot(t[-1]-0.1, Xf, 'b*') # Pos Final
ax1.plot(t[-1]-0.1, 0, 'r*') # Vel Final
ax1.set_ylim(-plot_range, plot_range)
print()
plt.show()
def large_test():
random.seed(1) # Repeatable tests by using specific seed
for x in range(100):
print("Test {}".format(x))
Vmax = random.uniform(0.1*Vmax_range, Vmax_range)
Amax = random.uniform(0.1*Amax_range, Amax_range)
Dmax = Amax
Xf = random.uniform(-pos_range, pos_range)
Xi = random.uniform(-pos_range, pos_range)
if random.random() <= 0.5:
Vi = random.uniform(-Vmax*1.5, Vmax*1.5)
else:
Vi = 0
(Ar, Vr, Dr, Ta, Tv, Td, Tf) = PlanTrap(Xf, Xi, Vi, Vmax, Amax, Dmax)
(Y, Yd, Ydd, t) = EvalTrap(Xf, Xi, Vi, Ar, Vr, Dr, Ta, Tv, Td, Tf)
print()
if __name__ == '__main__':
large_test()
graphical_test() | mit |
kmike/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 31 | 6147 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
"""Classifier can be retrained on different labels and features."""
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
mugizico/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
sarahgrogan/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
google-code-export/nmrglue | doc/_build/html/examples/el/sample_applications/apod_viewer_1win.py | 10 | 9854 | #!/usr/bin/env python
"""
An example of using wxPython to build a GUI application using nmrglue
This application displays the NMRPipe apodization windows
"""
import numpy as np
import nmrglue as ng
import matplotlib
# uncomment the following to use wx rather than wxagg
#matplotlib.use('WX')
#from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
# comment out the following to use wx rather than wxagg
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
apod_list = ["SP","EM","GM","GMB","TM","TRI","JMOD"]
class ParameterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,-1)
self.parent = parent
self.qName1 = wx.StaticText(self,-1,"Type:")
self.qName2 = wx.Choice(self,-1,choices=apod_list)
self.Bind(wx.EVT_CHOICE,self.ApodChoose,self.qName2)
self.q1_1 = wx.StaticText(self,-1,"q1:")
self.q1_2 = wx.TextCtrl(self,-1,"0.0")
self.q2_1 = wx.StaticText(self,-1,"q2:")
self.q2_2 = wx.TextCtrl(self,-1,"1.0")
self.q3_1 = wx.StaticText(self,-1,"q3:")
self.q3_2 = wx.TextCtrl(self,-1,"1.0")
self.c1 = wx.StaticText(self,-1,"c")
self.c2 = wx.TextCtrl(self,-1,"1.0")
self.start_1 = wx.StaticText(self,-1,"Start")
self.start_2 = wx.TextCtrl(self,-1,"1.0")
self.size_1 = wx.StaticText(self,-1,"Size")
self.size_1.Enable(False)
self.size_2 = wx.TextCtrl(self,-1,"1.0")
self.size_2.Enable(False)
self.inv = wx.CheckBox(self,-1,"Invert")
self.use_size = wx.CheckBox(self,-1,"Custom Size")
self.Bind(wx.EVT_CHECKBOX,self.OnLimitCheck,self.use_size)
self.points_1 = wx.StaticText(self,-1,"Number of Points:")
self.points_2 = wx.TextCtrl(self,-1,"1000")
self.sw_1 = wx.StaticText(self,-1,"Spectral Width:")
self.sw_2 = wx.TextCtrl(self,-1,"50000.")
self.b1 = wx.Button(self,10,"Draw")
self.Bind(wx.EVT_BUTTON,self.OnDraw,self.b1)
self.b1.SetDefault()
self.b2 = wx.Button(self,20,"Clear")
self.Bind(wx.EVT_BUTTON,self.OnClear,self.b2)
self.b2.SetDefault()
self.InitApod("SP")
# layout
apod_grid = wx.GridSizer(8,2)
apod_grid.AddMany([self.qName1, self.qName2,
self.q1_1, self.q1_2,
self.q2_1, self.q2_2,
self.q3_1, self.q3_2,
self.c1,self.c2,
self.start_1,self.start_2,
self.size_1,self.size_2,
self.inv,self.use_size])
data_grid = wx.GridSizer(2,2)
data_grid.AddMany([self.points_1,self.points_2,
self.sw_1,self.sw_2])
apod_box = wx.StaticBoxSizer(wx.StaticBox(self,-1,
"Apodization Parameters"))
apod_box.Add(apod_grid)
data_box = wx.StaticBoxSizer(wx.StaticBox(self,-1,
"Data Parameters"))
data_box.Add(data_grid)
button_box = wx.GridSizer(1,2)
button_box.AddMany([self.b1,self.b2])
mainbox = wx.BoxSizer(wx.VERTICAL)
mainbox.Add(apod_box)
mainbox.Add(data_box)
mainbox.Add(button_box)
self.SetSizer(mainbox)
def OnLimitCheck(self,event):
k= event.IsChecked()
self.size_1.Enable(k)
self.size_2.Enable(k)
points = float(self.points_2.GetValue())
self.size_2.SetValue(str(points))
def ApodChoose(self,event):
self.InitApod(apod_list[self.qName2.GetCurrentSelection()])
def InitApod(self,qName):
if qName == "SP":
self.q1_1.Enable(True)
self.q1_1.SetLabel("off")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("end")
self.q2_2.Enable(True)
self.q2_2.SetValue("1.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("pow")
self.q3_2.Enable(True)
self.q3_2.SetValue("1.0")
elif qName == "EM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("lb (Hz)")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(False)
self.q2_2.Enable(False)
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "GM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("g1 (Hz)")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("g2 (Hz)")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("g3")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
elif qName == "GMB":
self.q1_1.Enable(True)
self.q1_1.SetLabel("lb")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("gb")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "TM":
self.q1_1.Enable(True)
self.q1_1.SetLabel("t1")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("t2")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(False)
self.q3_2.Enable(False)
elif qName == "TRI":
self.q1_1.Enable(True)
self.q1_1.SetLabel("loc")
self.q1_2.Enable(True)
points = points = float(self.points_2.GetValue())
self.q1_2.SetValue(str(points/2.))
self.q2_1.Enable(True)
self.q2_1.SetLabel("lHi")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("rHi")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
elif qName == "JMOD":
self.q1_1.Enable(True)
self.q1_1.SetLabel("off")
self.q1_2.Enable(True)
self.q1_2.SetValue("0.0")
self.q2_1.Enable(True)
self.q2_1.SetLabel("j (Hz)")
self.q2_2.Enable(True)
self.q2_2.SetValue("0.0")
self.q3_1.Enable(True)
self.q3_1.SetLabel("lb (Hz)")
self.q3_2.Enable(True)
self.q3_2.SetValue("0.0")
def OnDraw(self,event):
qName = apod_list[self.qName2.GetCurrentSelection()]
q1 = float(self.q1_2.GetValue())
q2 = float(self.q2_2.GetValue())
q3 = float(self.q3_2.GetValue())
c = float(self.c2.GetValue())
start = float(self.start_2.GetValue())
size = float(self.size_2.GetValue())
inv = self.inv.GetValue()
use_size = self.use_size.GetValue()
points = float(self.points_2.GetValue())
sw = float(self.sw_2.GetValue())
self.parent.ApplyApod(qName,q1,q2,q3,c,start,size,inv,use_size,
points,sw)
def OnClear(self,event):
self.parent.ClearFigure()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,'Apodization Viewer')
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.params = ParameterPanel(self)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
# layout
fsizer = wx.BoxSizer(wx.VERTICAL)
fsizer.Add(self.canvas,0,wx.EXPAND)
fsizer.Add(self.toolbar,0,wx.EXPAND)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.params,0,wx.EXPAND)
self.sizer.Add(fsizer,0,wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
def OnPaint(self, event):
self.canvas.draw()
def ClearFigure(self):
self.axes.cla()
self.OnPaint(-1)
def ApplyApod(self,qName,q1,q2,q3,c,start,size,inv,use_size,points,sw):
"""
print "DEBUGGING INFOMATION"
print "ApplyApod Recieved:"
print "qName:",qName
print "q1:",q1
print "q2:",q2
print "q3:",q3
print "c:",c
print "start:",start
print "size:",size
print "inv:",inv
print "use_size:",use_size
print "points:",points
print "sw:",sw
"""
# create the dictionary
dic = ng.fileiobase.create_blank_udic(1)
dic[0]["sw"] = sw
dic[0]["size"] = points
# create the data
data = np.ones(points,dtype="complex")
# convert to NMRPipe format
C = ng.convert.converter()
C.from_universal(dic,data)
pdic,pdata = C.to_pipe()
if use_size == True:
tsize = size
else:
tsize = 'default'
null,apod_data = ng.pipe_proc.apod(pdic,pdata,qName=qName,
q1=q1,q2=q2,q3=q3,c=c,inv=inv,size=tsize,start=start)
# draw the window
#self.axes.cla()
self.axes.plot(apod_data)
self.OnPaint(-1)
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| bsd-3-clause |
harisbal/pandas | asv_bench/benchmarks/multiindex_object.py | 3 | 3775 | import string
import numpy as np
import pandas.util.testing as tm
from pandas import date_range, MultiIndex
class GetLoc(object):
def setup(self):
self.mi_large = MultiIndex.from_product(
[np.arange(1000), np.arange(20), list(string.ascii_letters)],
names=['one', 'two', 'three'])
self.mi_med = MultiIndex.from_product(
[np.arange(1000), np.arange(10), list('A')],
names=['one', 'two', 'three'])
self.mi_small = MultiIndex.from_product(
[np.arange(100), list('A'), list('A')],
names=['one', 'two', 'three'])
def time_large_get_loc(self):
self.mi_large.get_loc((999, 19, 'Z'))
def time_large_get_loc_warm(self):
for _ in range(1000):
self.mi_large.get_loc((999, 19, 'Z'))
def time_med_get_loc(self):
self.mi_med.get_loc((999, 9, 'A'))
def time_med_get_loc_warm(self):
for _ in range(1000):
self.mi_med.get_loc((999, 9, 'A'))
def time_string_get_loc(self):
self.mi_small.get_loc((99, 'A', 'A'))
def time_small_get_loc_warm(self):
for _ in range(1000):
self.mi_small.get_loc((99, 'A', 'A'))
class Duplicates(object):
def setup(self):
size = 65536
arrays = [np.random.randint(0, 8192, size),
np.random.randint(0, 1024, size)]
mask = np.random.rand(size) < 0.1
self.mi_unused_levels = MultiIndex.from_arrays(arrays)
self.mi_unused_levels = self.mi_unused_levels[mask]
def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
class Integer(object):
def setup(self):
self.mi_int = MultiIndex.from_product([np.arange(1000),
np.arange(1000)],
names=['one', 'two'])
self.obj_index = np.array([(0, 10), (0, 11), (0, 12),
(0, 13), (0, 14), (0, 15),
(0, 16), (0, 17), (0, 18),
(0, 19)], dtype=object)
def time_get_indexer(self):
self.mi_int.get_indexer(self.obj_index)
def time_is_monotonic(self):
self.mi_int.is_monotonic
class Duplicated(object):
def setup(self):
n, k = 200, 5000
levels = [np.arange(n),
tm.makeStringIndex(n).values,
1000 + np.arange(n)]
labels = [np.random.choice(n, (k * n)) for lev in levels]
self.mi = MultiIndex(levels=levels, labels=labels)
def time_duplicated(self):
self.mi.duplicated()
class Sortlevel(object):
def setup(self):
n = 1182720
low, high = -4096, 4096
arrs = [np.repeat(np.random.randint(low, high, (n // k)), k)
for k in [11, 7, 5, 3, 1]]
self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)]
a = np.repeat(np.arange(100), 1000)
b = np.tile(np.arange(1000), 100)
self.mi = MultiIndex.from_arrays([a, b])
self.mi = self.mi.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi_int.sortlevel()
def time_sortlevel_zero(self):
self.mi.sortlevel(0)
def time_sortlevel_one(self):
self.mi.sortlevel(1)
class Values(object):
def setup_cache(self):
level1 = range(1000)
level2 = date_range(start='1/1/2012', periods=100)
mi = MultiIndex.from_product([level1, level2])
return mi
def time_datetime_level_values_copy(self, mi):
mi.copy().values
def time_datetime_level_values_sliced(self, mi):
mi[:10].values
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/regression/tests/test_quantile_regression.py | 8 | 7766 |
import scipy.stats
import numpy as np
import statsmodels.api as sm
from numpy.testing import assert_allclose, assert_equal, assert_almost_equal
from patsy import dmatrices # pylint: disable=E0611
from statsmodels.regression.quantile_regression import QuantReg
from .results_quantile_regression import (
biweight_chamberlain, biweight_hsheather, biweight_bofinger,
cosine_chamberlain, cosine_hsheather, cosine_bofinger,
gaussian_chamberlain, gaussian_hsheather, gaussian_bofinger,
epan2_chamberlain, epan2_hsheather, epan2_bofinger,
parzen_chamberlain, parzen_hsheather, parzen_bofinger,
#rectangle_chamberlain, rectangle_hsheather, rectangle_bofinger,
#triangle_chamberlain, triangle_hsheather, triangle_bofinger,
#epanechnikov_chamberlain, epanechnikov_hsheather, epanechnikov_bofinger,
epanechnikov_hsheather_q75, Rquantreg)
idx = ['income', 'Intercept']
class CheckModelResultsMixin(object):
def test_params(self):
assert_allclose(np.ravel(self.res1.params.ix[idx]),
self.res2.table[:,0], rtol=1e-3)
def test_bse(self):
assert_equal(self.res1.scale, 1)
assert_allclose(np.ravel(self.res1.bse.ix[idx]),
self.res2.table[:,1], rtol=1e-3)
def test_tvalues(self):
assert_allclose(np.ravel(self.res1.tvalues.ix[idx]),
self.res2.table[:,2], rtol=1e-2)
def test_pvalues(self):
pvals_stata = scipy.stats.t.sf(self.res2.table[:, 2] , self.res2.df_r)
assert_allclose(np.ravel(self.res1.pvalues.ix[idx]),
pvals_stata, rtol=1.1)
# test that we use the t distribution for the p-values
pvals_t = scipy.stats.t.sf(self.res1.tvalues , self.res2.df_r) * 2
assert_allclose(np.ravel(self.res1.pvalues),
pvals_t, rtol=1e-9, atol=1e-10)
def test_conf_int(self):
assert_allclose(self.res1.conf_int().ix[idx],
self.res2.table[:,-2:], rtol=1e-3)
def test_nobs(self):
assert_allclose(self.res1.nobs, self.res2.N, rtol=1e-3)
def test_df_model(self):
assert_allclose(self.res1.df_model, self.res2.df_m, rtol=1e-3)
def test_df_resid(self):
assert_allclose(self.res1.df_resid, self.res2.df_r, rtol=1e-3)
def test_prsquared(self):
assert_allclose(self.res1.prsquared, self.res2.psrsquared, rtol=1e-3)
def test_sparsity(self):
assert_allclose(np.array(self.res1.sparsity),
self.res2.sparsity, rtol=1e-3)
def test_bandwidth(self):
assert_allclose(np.array(self.res1.bandwidth),
self.res2.kbwidth, rtol=1e-3)
d = {('biw','bofinger'): biweight_bofinger,
('biw','chamberlain'): biweight_chamberlain,
('biw','hsheather'): biweight_hsheather,
('cos','bofinger'): cosine_bofinger,
('cos','chamberlain'): cosine_chamberlain,
('cos','hsheather'): cosine_hsheather,
('gau','bofinger'): gaussian_bofinger,
('gau','chamberlain'): gaussian_chamberlain,
('gau','hsheather'): gaussian_hsheather,
('par','bofinger'): parzen_bofinger,
('par','chamberlain'): parzen_chamberlain,
('par','hsheather'): parzen_hsheather,
#('rec','bofinger'): rectangle_bofinger,
#('rec','chamberlain'): rectangle_chamberlain,
#('rec','hsheather'): rectangle_hsheather,
#('tri','bofinger'): triangle_bofinger,
#('tri','chamberlain'): triangle_chamberlain,
#('tri','hsheather'): triangle_hsheather,
('epa', 'bofinger'): epan2_bofinger,
('epa', 'chamberlain'): epan2_chamberlain,
('epa', 'hsheather'): epan2_hsheather
#('epa2', 'bofinger'): epan2_bofinger,
#('epa2', 'chamberlain'): epan2_chamberlain,
#('epa2', 'hsheather'): epan2_hsheather
}
def setup_fun(kernel='gau', bandwidth='bofinger'):
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
statsm = QuantReg(y, X).fit(vcov='iid', kernel=kernel, bandwidth=bandwidth)
stata = d[(kernel, bandwidth)]
return statsm, stata
def test_fitted_residuals():
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
res = QuantReg(y, X).fit(q=.1)
# Note: maxabs relative error with fitted is 1.789e-09
assert_almost_equal(np.array(res.fittedvalues), Rquantreg.fittedvalues, 5)
assert_almost_equal(np.array(res.predict()), Rquantreg.fittedvalues, 5)
assert_almost_equal(np.array(res.resid), Rquantreg.residuals, 5)
class TestEpanechnikovHsheatherQ75(CheckModelResultsMixin):
# Vincent Arel-Bundock also spot-checked q=.1
@classmethod
def setUp(cls):
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
cls.res1 = QuantReg(y, X).fit(q=.75, vcov='iid', kernel='epa', bandwidth='hsheather')
cls.res2 = epanechnikov_hsheather_q75
class TestEpanechnikovBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'bofinger')
class TestEpanechnikovChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'chamberlain')
class TestEpanechnikovHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'hsheather')
class TestGaussianBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'bofinger')
class TestGaussianChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'chamberlain')
class TestGaussianHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'hsheather')
class TestBiweightBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'bofinger')
class TestBiweightChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'chamberlain')
class TestBiweightHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'hsheather')
class TestCosineBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'bofinger')
class TestCosineChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'chamberlain')
class TestCosineHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'hsheather')
class TestParzeneBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'bofinger')
class TestParzeneChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'chamberlain')
class TestParzeneHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'hsheather')
#class TestTriangleBofinger(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'bofinger')
#class TestTriangleChamberlain(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'chamberlain')
#class TestTriangleHsheather(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'hsheather')
| bsd-3-clause |
yanheven/pyfolio | setup.py | 5 | 2484 | #!/usr/bin/env python
from setuptools import setup
import sys
DISTNAME = 'pyfolio'
DESCRIPTION = "pyfolio is a Python library for performance and risk analysis of financial portfolios"
LONG_DESCRIPTION = """pyfolio is a Python library for performance and risk analysis of
financial portfolios developed by `Quantopian Inc`_. It works well with the
`Zipline`_ open source backtesting library.
At the core of pyfolio is a so-called tear sheet that consists of
various individual plots that provide a comprehensive performance
overview of a portfolio.
.. _Quantopian Inc: https://www.quantopian.com
.. _Zipline: http://zipline.io
"""
MAINTAINER = 'Quantopian Inc'
MAINTAINER_EMAIL = '[email protected]'
AUTHOR = 'Quantopian Inc'
AUTHOR_EMAIL = '[email protected]'
URL = "https://github.com/quantopian/pyfolio"
LICENSE = "Apache License, Version 2.0"
VERSION = "0.1"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = [
'funcsigs>=0.4',
'matplotlib>=1.4.0',
'mock>=1.1.2',
'numpy>=1.9.1',
'pandas>=0.15.0',
'pyparsing>=2.0.3',
'python-dateutil>=2.4.2',
'pytz>=2014.10',
'scikit-learn>=0.15.0',
'scipy>=0.14.0',
'seaborn>=0.6.0',
'statsmodels>=0.5.0']
extras_reqs = {
'bayesian': ['pymc3']
}
test_reqs = ['nose>=1.3.7', 'nose-parameterized>=0.5.0', 'runipy>=0.1.3']
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pyfolio', 'pyfolio.tests'],
package_data={'pyfolio': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
extras_requires=extras_reqs,
tests_require=test_reqs,
test_suite='nose.collector')
| apache-2.0 |
jamesrobertlloyd/automl-phase-2 | learners.py | 1 | 26697 | from __future__ import division
__author__ = 'James Robert Lloyd, Emma Smith'
__description__ = 'Objects that learn from data and predict things'
import time
import copy
import os
# import cPickle as pickle
from collections import defaultdict
import numpy as np
# from sklearn import metrics
import libscores
from agent import Agent, TerminationEx
import util
import constants
import logging
import global_data
# Set up logging for learners module
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# TODO - WarmLearner and OneShotLearner should derive from a common base class
class LearnerAgent(Agent):
"""Base class for agent wrappers around learners"""
def __init__(self, learner, learner_kwargs, train_idx, test_idx, data_info, feature_subset, **kwargs):
super(LearnerAgent, self).__init__(**kwargs)
self.learner = learner(**learner_kwargs)
self.train_idx = train_idx
self.test_idx = test_idx
self.data_info = data_info
self.feature_subset = feature_subset
self.time_before_checkpoint = 0
self.time_checkpoint = None
self.score_times = []
self.score_values = []
self.held_out_prediction_times = []
self.held_out_prediction_files = []
self.valid_prediction_times = []
self.valid_prediction_files = []
self.test_prediction_times = []
self.test_prediction_files = []
self.all_class_labels = []
self.training_class_labels = []
self.training_to_all_class_labels = []
self.test_truth = None
self.data_source = None # Record how we should access data
def first_action(self):
# Record the observed class labels if doing multiclass classification - in case training data does not have
# examples of all classes
if self.data_info['task'] == 'multiclass.classification':
self.all_class_labels = np.unique(self.data['Y_train'])
self.training_class_labels = np.unique(self.data['Y_train'][self.train_idx])
self.training_to_all_class_labels = []
for class_label in self.training_class_labels:
location = np.where(class_label == self.all_class_labels)[0][0]
self.training_to_all_class_labels.append(location)
# Record the truth
if self.data_info['task'] == 'multiclass.classification':
self.test_truth = self.data['Y_train_1_of_k'][self.test_idx]
else:
self.test_truth = self.data['Y_train'][self.test_idx]
# Set up feature subset if none or too large
if self.feature_subset is None or self.feature_subset > self.data['X_train'].shape[1]:
self.feature_subset = self.data['X_train'].shape[1]
def get_data(self, name, rows, max_cols):
"""Gets data whilst dealing with sparse / dense stuff"""
if self.data_source == constants.ORIGINAL:
if rows == 'all':
return self.data[name][:, :max_cols]
else:
return self.data[name][rows, :max_cols]
elif self.data_source == constants.DENSE:
if rows == 'all':
return self.data[name + '_dense'][:, :max_cols]
else:
return self.data[name + '_dense'][rows, :max_cols]
elif self.data_source == constants.CONVERT_TO_DENSE:
if rows == 'all':
return self.data[name][:, :max_cols].toarray()
else:
return self.data[name][rows, :max_cols].toarray()
else:
raise Exception('Unrecognised data source = %s' % self.data_source)
def fit(self, rows, max_cols):
"""Deals with sparse / dense stuff"""
if self.data_source is None:
# Need to determine appropriate data source
try:
self.learner.fit(X=self.data['X_train'][rows, :max_cols],
y=self.data['Y_train'][rows])
self.data_source = constants.ORIGINAL
except TypeError:
# Failed to use sparse data
if 'X_train_dense' in self.data:
self.learner.fit(X=self.data['X_train_dense'][rows, :max_cols],
y=self.data['Y_train'][rows])
self.data_source = constants.DENSE
else:
self.learner.fit(X=self.data['X_train'][rows, :max_cols].toarray(),
y=self.data['Y_train'][rows])
self.data_source = constants.CONVERT_TO_DENSE
else:
self.learner.fit(X=self.get_data(name='X_train', rows=rows, max_cols=max_cols),
y=self.data['Y_train'][rows])
def predict(self, name, rows, max_cols):
"""Deals with different types of task"""
X_test = self.get_data(name=name, rows=rows, max_cols=max_cols)
if self.data_info['task'] == 'binary.classification':
return self.learner.predict_proba(X_test)[:, -1]
elif self.data_info['task'] == 'multiclass.classification':
result = np.ones((X_test.shape[0], len(self.all_class_labels)))
result[:, self.training_to_all_class_labels] = self.learner.predict_proba(X_test)
return result
else:
raise Exception('I do not know how to form predictions for task : %s' % self.data_info['task'])
class WarmLearnerAgent(LearnerAgent):
"""Agent wrapper around warm learner"""
def __init__(self, time_quantum=30, n_estimators_quantum=1, n_samples=10, **kwargs):
super(WarmLearnerAgent, self).__init__(**kwargs)
self.time_quantum = time_quantum
self.n_estimators_quantum = n_estimators_quantum
self.learner.n_estimators = self.n_estimators_quantum
self.n_samples = n_samples
self.run_one_iteration = False
def read_messages(self):
while True:
try:
message = self.inbox.pop(0)
except (IndexError, AttributeError):
break
else:
self.standard_responses(message)
# print(message)
if message['subject'] == 'compute quantum':
# print('Warm learner received compute quantum message')
self.time_quantum = message['compute_quantum']
elif message['subject'] == 'run one iteration':
self.run_one_iteration = True
def next_action(self):
# Read messages
self.read_messages()
# Start timing
self.time_checkpoint = time.clock()
predict_time = self.time_quantum / self.n_samples
scores_so_far = 0
# Increase estimators and learn
while time.clock() - self.time_checkpoint < self.time_quantum:
# Read messages - maybe compute quantum has changed?
self.get_parent_inbox()
self.read_messages()
# Do learning
self.learner.n_estimators += self.n_estimators_quantum
start_time = time.clock()
self.fit(self.train_idx, self.feature_subset)
time_taken = time.clock() - start_time
if global_data.exp['slowdown_factor'] > 1:
util.waste_cpu_time(time_taken * (global_data.exp['slowdown_factor'] - 1))
if time.clock() - self.time_checkpoint > predict_time:
predictions = self.predict('X_train', self.test_idx, self.feature_subset)
truth = self.test_truth
score = libscores.eval_metric(metric=self.data_info['eval_metric'],
truth=truth,
predictions=predictions,
task=self.data_info['task'])
self.score_times.append(time.clock() - self.time_checkpoint + self.time_before_checkpoint)
self.score_values.append(score)
# Send score and time to parent
self.send_to_parent(dict(subject='score', sender=self.name,
time=self.score_times[-1],
score=self.score_values[-1]))
scores_so_far += 1
# Next time at which to make a prediction
if self.n_samples > scores_so_far:
predict_time = time.clock() - self.time_checkpoint + \
(self.time_quantum - (time.clock() - self.time_checkpoint)) / \
(self.n_samples - scores_so_far)
else:
break
# Save total time taken
# TODO - this is ignoring the time taken to make valid and test predictions
self.time_before_checkpoint += time.clock() - self.time_checkpoint
# Now make predictions
# FIXME - send all of this data at the same time to prevent gotchas
if 'X_valid' in self.data:
predictions = self.predict('X_valid', 'all', self.feature_subset)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.valid_prediction_files.append(tmp_filename)
self.valid_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='valid',
time=self.valid_prediction_times[-1],
filename=self.valid_prediction_files[-1]))
if 'X_test' in self.data:
predictions = self.predict('X_test', 'all', self.feature_subset)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.test_prediction_files.append(tmp_filename)
self.test_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='test',
time=self.test_prediction_times[-1],
filename=self.test_prediction_files[-1]))
predictions = self.predict('X_train', self.test_idx, self.feature_subset)
# print('Held out')
# print(predictions[0])
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.held_out_prediction_files.append(tmp_filename)
self.held_out_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='held out',
idx=self.test_idx,
time=self.held_out_prediction_times[-1],
filename=self.held_out_prediction_files[-1]))
if self.run_one_iteration:
self.pause()
class OneShotLearnerAgent(LearnerAgent):
"""Agent wrapper around learner which learns once"""
def __init__(self, **kwargs):
super(OneShotLearnerAgent, self).__init__(**kwargs)
def read_messages(self):
while True:
try:
message = self.inbox.pop(0)
except (IndexError, AttributeError):
break
else:
self.standard_responses(message)
def next_action(self):
self.read_messages()
# Start timing
self.time_checkpoint = time.clock()
# Fit learner
self.fit(self.train_idx, self.feature_subset)
# Make predictions on held out set and evaluate
predictions = self.predict('X_train', self.test_idx, self.feature_subset)
truth = self.test_truth
score = libscores.eval_metric(metric=self.data_info['eval_metric'],
truth=truth,
predictions=predictions,
task=self.data_info['task'])
self.score_times.append(time.clock() - self.time_checkpoint + self.time_before_checkpoint)
self.score_values.append(score)
# Send score and time to parent
self.send_to_parent(dict(subject='score', sender=self.name,
time=self.score_times[-1],
score=self.score_values[-1]))
# Save total time taken
# TODO - this is ignoring the time taken to make valid and test predictions
self.time_before_checkpoint += time.clock() - self.time_checkpoint
# Now make predictions on valid, test and held out sets
# FIXME - send all of this data at the same time to prevent gotchas
if 'X_valid' in self.data:
predictions = self.predict('X_valid', 'all', self.feature_subset)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.valid_prediction_files.append(tmp_filename)
self.valid_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='valid',
time=self.valid_prediction_times[-1],
filename=self.valid_prediction_files[-1]))
if 'X_test' in self.data:
predictions = self.predict('X_test', 'all', self.feature_subset)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.test_prediction_files.append(tmp_filename)
self.test_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='test',
time=self.test_prediction_times[-1],
filename=self.test_prediction_files[-1]))
predictions = self.predict('X_train', self.test_idx, self.feature_subset)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
self.held_out_prediction_files.append(tmp_filename)
self.held_out_prediction_times.append(self.time_before_checkpoint)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='held out',
idx=self.test_idx,
time=self.held_out_prediction_times[-1],
filename=self.held_out_prediction_files[-1]))
# And I'm spent
raise TerminationEx
class CrossValidationAgent(Agent):
"""Basic cross validation agent"""
def __init__(self, learner, learner_kwargs, agent_kwargs, folds, data_info,
agent=WarmLearnerAgent, subset_prop=1, feature_subset=None, **kwargs):
super(CrossValidationAgent, self).__init__(**kwargs)
self.data_info = data_info
self.child_info = []
for train, test in folds:
if subset_prop < 1:
# noinspection PyUnresolvedReferences
train = train[:int(np.floor(subset_prop * train.size))]
self.child_info.append((agent,
util.merge_dicts(agent_kwargs,
dict(learner=learner, learner_kwargs=learner_kwargs,
train_idx=train, test_idx=test,
data_info=data_info,
feature_subset=feature_subset))))
self.score_times = []
self.score_values = []
self.held_out_prediction_times = []
self.held_out_prediction_files = []
self.valid_prediction_times = []
self.valid_prediction_files = []
self.test_prediction_times = []
self.test_prediction_files = []
self.child_score_times = dict()
self.child_score_values = dict()
self.child_held_out_prediction_times = dict()
self.child_held_out_prediction_files = dict()
self.child_held_out_idx = dict()
self.child_valid_prediction_times = dict()
self.child_valid_prediction_files = dict()
self.child_test_prediction_times = dict()
self.child_test_prediction_files = dict()
self.communication_sleep = 0.1
# TODO: Improve this hack!
if agent == WarmLearnerAgent:
self.immortal_offspring = True
else:
self.immortal_offspring = False
def read_messages(self):
for child_name, inbox in self.child_inboxes.iteritems():
while True:
try:
message = inbox.pop(0)
except (IndexError, AttributeError):
break
else:
if message['subject'] == 'score':
self.child_score_times[child_name].append(message['time'])
self.child_score_values[child_name].append(message['score'])
elif message['subject'] == 'predictions':
if message['partition'] == 'valid':
self.child_valid_prediction_times[child_name].append(message['time'])
self.child_valid_prediction_files[child_name].append(message['filename'])
elif message['partition'] == 'test':
self.child_test_prediction_times[child_name].append(message['time'])
self.child_test_prediction_files[child_name].append(message['filename'])
elif message['partition'] == 'held out':
self.child_held_out_idx[child_name] = message['idx']
self.child_held_out_prediction_times[child_name].append(message['time'])
self.child_held_out_prediction_files[child_name].append(message['filename'])
while True:
try:
message = self.inbox.pop(0)
except (IndexError, AttributeError):
break
else:
self.standard_responses(message)
# print(message)
if message['subject'] == 'compute quantum':
# print('Cross validater received compute quantum message')
self.send_to_children(message)
def first_action(self):
self.create_children(classes=self.child_info)
for child_name in self.child_processes.iterkeys():
self.child_score_times[child_name] = []
self.child_score_values[child_name] = []
self.child_held_out_prediction_times[child_name] = []
self.child_held_out_prediction_files[child_name] = []
self.child_valid_prediction_times[child_name] = []
self.child_valid_prediction_files[child_name] = []
self.child_test_prediction_times[child_name] = []
self.child_test_prediction_files[child_name] = []
# self.broadcast_to_children(message=dict(subject='start'))
self.start_children()
def next_action(self):
# Check mail
self.read_messages()
# Collect up scores and predictions - even if paused, children may still be finishing tasks
min_n_scores = min(len(scores) for scores in self.child_score_values.itervalues())
while len(self.score_values) < min_n_scores:
n = len(self.score_values)
num_scores = 0
sum_scores = 0
for child_scores in self.child_score_values.itervalues():
# noinspection PyUnresolvedReferences
if not np.isnan(child_scores[n]):
num_scores += 1
sum_scores += child_scores[n]
score = sum_scores / num_scores
# score = sum(scores[n] for scores in self.child_score_values.itervalues()) /\
# len(self.child_score_values)
maxtime = max(times[n] for times in self.child_score_times.itervalues())
self.score_values.append(score)
self.score_times.append(maxtime)
self.send_to_parent(dict(subject='score', sender=self.name,
time=self.score_times[-1],
score=self.score_values[-1]))
# FIXME - send all of this data at the same time to prevent gotchas
min_n_valid = min(len(times) for times in self.child_valid_prediction_times.itervalues())
while len(self.valid_prediction_times) < min_n_valid:
n = len(self.valid_prediction_times)
predictions = None
for child_name in self.child_score_times.iterkeys():
filename = self.child_valid_prediction_files[child_name][n]
child_predictions = np.load(filename)
os.remove(filename)
if predictions is None:
predictions = child_predictions
else:
predictions += child_predictions
predictions /= len(self.child_score_times)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
maxtime = max(times[n] for times in self.child_valid_prediction_times.itervalues())
self.valid_prediction_files.append(tmp_filename)
self.valid_prediction_times.append(maxtime)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='valid',
time=self.valid_prediction_times[-1],
filename=self.valid_prediction_files[-1]))
min_n_test = min(len(times) for times in self.child_test_prediction_times.itervalues())
while len(self.test_prediction_times) < min_n_test:
n = len(self.test_prediction_times)
predictions = None
for child_name in self.child_score_times.iterkeys():
filename = self.child_test_prediction_files[child_name][n]
child_predictions = np.load(filename)
os.remove(filename)
if predictions is None:
predictions = child_predictions
else:
predictions += child_predictions
predictions /= len(self.child_score_times)
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
maxtime = max(times[n] for times in self.child_test_prediction_times.itervalues())
self.test_prediction_files.append(tmp_filename)
self.test_prediction_times.append(maxtime)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='test',
time=self.test_prediction_times[-1],
filename=self.test_prediction_files[-1]))
min_n_held_out = min(len(times) for times in self.child_held_out_prediction_times.itervalues())
while len(self.held_out_prediction_times) < min_n_held_out:
n = len(self.held_out_prediction_times)
# FIXME - get rid of if else here
if self.data_info['task'] == 'multiclass.classification':
predictions = np.zeros(self.data['Y_train_1_of_k'].shape)
# print('Prediction shape')
# print(predictions.shape)
else:
predictions = np.zeros(self.data['Y_train'].shape)
for child_name in self.child_score_times.iterkeys():
filename = self.child_held_out_prediction_files[child_name][n]
child_predictions = np.load(filename)
os.remove(filename)
predictions[self.child_held_out_idx[child_name]] = child_predictions
# print('Combined predictions')
# print(predictions[0])
tmp_filename = util.random_temp_file_name('.npy')
np.save(tmp_filename, predictions)
maxtime = max(times[n] for times in self.child_held_out_prediction_times.itervalues())
self.held_out_prediction_files.append(tmp_filename)
self.held_out_prediction_times.append(maxtime)
self.send_to_parent(dict(subject='predictions', sender=self.name, partition='held out',
time=self.held_out_prediction_times[-1],
filename=self.held_out_prediction_files[-1]))
# Check to see if all children have terminated - if so, terminate this agent
# immortal child dying is failure
# mortal child dying without sending results is failure
# any child failure should kill parent
if self.immortal_offspring is True and len(self.conns_from_children) != len(self.child_states):
logger.error("%s: Immortal child has died. Dying of grief", self.name)
raise TerminationEx
elif self.immortal_offspring is False:
dead_kids = [x for x in self.child_states if x not in self.conns_from_children]
for dk in dead_kids:
if len(self.child_test_prediction_files[dk]) == 0:
logger.error("%s: Mortal child %s has died without sending results", self.name, dk)
raise TerminationEx
if len(self.conns_from_children) == 0:
logger.info("%s: No children remaining. Terminating.", self.name)
raise TerminationEx
class WarmLearner(object):
"""Wrapper around things like random forest that don't have a warm start method"""
def __init__(self, base_model, base_model_kwargs):
self.base_model = base_model(**base_model_kwargs)
self.model = copy.deepcopy(self.base_model)
self.n_estimators = self.model.n_estimators
self.first_fit = True
def predict_proba(self, X):
return self.model.predict_proba(X)
def fit(self, X, y):
if self.first_fit:
self.model.fit(X, y)
self.first_fit = False
# Keep training and appending base estimators to main model
while self.model.n_estimators < self.n_estimators:
self.base_model.fit(X, y)
self.model.estimators_ += self.base_model.estimators_
self.model.n_estimators = len(self.model.estimators_)
# Clip any extra models produced
self.model.estimators_ = self.model.estimators_[:self.n_estimators]
self.model.n_estimators = self.n_estimators | mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/datasets/tests/test_samples_generator.py | 3 | 16633 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
weights = [0.1, 0.25]
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=weights,
random_state=0)
assert_equal(weights, [0.1, 0.25])
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
# Test for n_features > 30
X, y = make_classification(n_samples=2000, n_features=31, n_informative=31,
n_redundant=0, n_repeated=0, hypercube=True,
scale=0.5, random_state=0)
assert_equal(X.shape, (2000, 31), "X shape mismatch")
assert_equal(y.shape, (2000,), "y shape mismatch")
assert_equal(np.unique(X.view([('', X.dtype)]*X.shape[1])).view(X.dtype)
.reshape(-1, X.shape[1]).shape[0], 2000,
"Unexpected number of unique rows")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
| mit |
monkeypants/MAVProxy | MAVProxy/modules/lib/MacOS/backend_wxagg.py | 7 | 5884 | from __future__ import division, print_function
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx # already uses wxversion.ensureMinimal('2.8')
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
import wx
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp | gpl-3.0 |
myuuuuun/NumericalCalculation | chapter2/chap2.py | 1 | 11780 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
https://github.com/myuuuuun/NumericalCalculation
This software is released under the MIT License.
"""
from __future__ import division, print_function
import math
import numpy as np
import functools
import sys
import types
import matplotlib.pyplot as plt
import matplotlib.cm as cm
EPSIRON = 1.0e-8
"""
グラフを描画し、その上に元々与えられていた点列を重ねてプロットする
INPUT:
points: 与えられた点列のリスト[[x_0, f_0], [x_1, f_1], ..., [x_n, f_n]]
x_list: 近似曲線を描写するxの範囲・密度
f_list: 上のxに対応するfの値
"""
def points_on_func(points, x_list, f_list, **kwargs):
title = kwargs.get('title', "与えられた点と近似曲線")
xlim = kwargs.get('xlim', False)
ylim = kwargs.get('ylim', False)
fig, ax = plt.subplots()
plt.title(title)
plt.plot(x_list, f_list, color='b', linewidth=1, label="近似曲線")
points_x = [point[0] for point in points]
points_y = [point[1] for point in points]
plt.plot(points_x, points_y, 'o', color='r', label="点列")
plt.xlabel("x")
plt.ylabel("f")
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
plt.legend()
plt.show()
"""
元々の関数と、(いくつかの)近似関数を重ねてプロットする
INPUT:
x_list: それぞれの近似曲線を描写するxの範囲・密度
f: 元々の関数のf
f_lists: 「それぞれの関数の、上のxに対応するfの値」の配列
"""
def funcs(x_list, f, f_lists, **kwargs):
title = kwargs.get('title', "元の関数と近似関数")
xlim = kwargs.get('xlim', False)
ylim = kwargs.get('ylim', False)
labels = kwargs.get('labels', False)
points = kwargs.get('points', False)
axis = kwargs.get('axis', False)
if not isinstance(f_lists, list):
f_lists = [f_lists]
# 近似曲線の本数
num = len(f_lists)
fig, ax = plt.subplots()
plt.title(title)
if axis:
plt.axvline(x=axis, color='#444444')
plt.plot(x_list, f, color='#444444', linewidth=3, label="元の曲線")
for i in range(num):
if labels:
plt.plot(x_list, f_lists[i], linewidth=1, color=cm.gist_rainbow(i*1.0/num), label=labels[i])
else:
plt.plot(x_list, f_lists[i], linewidth=1, color=cm.gist_rainbow(i*1.0/num))
if points:
points_x = [point[0] for point in points]
points_y = [point[1] for point in points]
plt.plot(points_x, points_y, 'o', color='#000000')
plt.xlabel("x")
plt.ylabel("f")
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
plt.legend()
plt.show()
"""
式(2.5)の実装
n+1個の点列を入力し、逆行列を解いて、補間多項式を求め、
n次補間多項式の係数行列[a_0, a_1, ..., a_n]を返す
INPUT
points: n+1個の点列[[x_0, f_0], [x_1, f_1], ..., [x_n, f_n]]
OUTPUT
n次補間多項式の係数行列[a_0, a_1, ..., a_n]を返す
"""
def lagrange(points):
# 次元数
dim = len(points) - 1
# [x_0^0, x_0^1, ..., x_0^n] [a_0] [f_0]
# [ . ] [ . ] [ . ]
# [ . ] * [ . ] = [ . ]
# [ . ] [ . ] [ . ]
# [x_n^0, x_n^1, ..., x_n^n] [a_n] [f_n]
#
# なので、A = X^-1 * F を計算する
# matrix Xをもとめる(ヴァンデルモンドの行列式)
x_matrix = np.array([[pow(point[0], j) for j in range(dim + 1)] for point in points])
# matrix Fをもとめる
f_matrix = np.array([point[1] for point in points])
# 線形方程式 X * A = F を解く
a_matrix = np.linalg.solve(x_matrix, f_matrix)
return a_matrix
"""
係数行列[a_0, a_1, ..., a_n] から、n次多項式 a_0 + a_1 * x + ... + a_n * x^n
を生成して返す(関数を返す)
"""
def make_polynomial(a_matrix):
def __func__(x):
f = 0
for n, a_i in enumerate(a_matrix):
f += a_i * pow(x, n)
return f
return __func__
"""
式(2.7)の実装
補間多項式を変形した式から、逆行列の計算をすることなく、ラグランジュの補間多項式を求める
ただし、今回は補間多項式の係数行列を返すのではなく、具体的なxの値のリストに対して、
補間値のリストを生成して返す
# INPUT
# points: 与えられた点列を入力
# x_list: 補間値を求めたいxのリストを入力
# OUTPUT
# f_list: x_listの各要素に対する補間値のリスト
"""
def lagrange2(points, x_list=np.arange(-5, 5, 0.1)):
dim = len(points) - 1
f_list = []
for x in x_list:
L = 0
for i in range(dim + 1):
Li = 1
for j in range(dim + 1):
if j != i:
Li *= (x - points[j][0]) / (points[i][0] - points[j][0])
Li *= points[i][1]
L += Li
f_list.append(L)
return f_list
"""
ネヴィルの算法の実装
# INPUT
# points: 与えられた点列を入力
# x: 補間多項式による近似値f(x)を求めたい点
# OUTPUT
# pointsのうち、xに近い何点かを使って近似した多項式におけるf(x)の値
"""
def neville(points, x, **kwargs):
# 使用する点の最大数
size = kwargs.get('size', len(points))
size = size if len(points) >= size else len(points)
# 途中でf(x)の値の変化が小さくなったら、その時点で値を返すか
useallpoints = kwargs.get('useallpoints', True)
# xから近い順に点列を並び替え
ordered = sorted(points, key=lambda point: pow(point[0] - x, 2))
# DP用メモを初期化
table = [[0 for j in range(i+1)] for i in range(size)]
# xに近い点から1つずつとりだしてループ。
# f(x)の値が適当に収束するか、点を全て使い切ったら終了
# (使った点の個数 - 1)次の多項式で関数を近似した時の、f(x)の値を返す
for i in range(size):
table[i][0] = ordered[i][1]
for j in range(1, i+1):
table[i][j] = ((ordered[i][0] - x) * table[i-1][j-1] - (ordered[i-j][0] - x) * table[i][j-1]) / (ordered[i][0] - ordered[i-j][0])
print(table[i][i])
if not useallpoints and math.fabs(table[i][i] - table[i-1][i-1]) < EPSIRON:
print("途中終了しました。全部で {0} 個の点を使いました。".format(i+1))
return table[i][i]
print("最大限の点を用いて近似しました。全部で {0} 個の点を使いました。".format(i+1))
return table[size-1][size-1]
def spline(points, x_list, step, **kwargs):
flag = kwargs.get('flag', False)
size = len(points)
a_matrix = np.zeros((size-2, size-2))
f_matrix = np.zeros(size-2)
# 小さい順に点列を並び替え
points = sorted(points, key=lambda point: point[0])
print(points)
for i in range(size-2):
for j in range(size-2):
if j == i:
a_matrix[i][j] = 4
elif j == i-1 or j == i+1:
a_matrix[i][j] = 1
else:
a_matrix[i][j] = 0
f_matrix[i] = (points[i][1] - 2 * points[i+1][1] + points[i+2][1]) * 6.0 / pow(step, 2)
dx2_matrix = np.r_[[0], np.linalg.solve(a_matrix, f_matrix), [0]]
print(a_matrix)
print(f_matrix)
print(dx2_matrix)
f_list = []
def __expression(p, q, u, v, x, fv, fu):
return ((q-p)*pow(x-u, 3))/(6*(v-u)) + (p * pow(x-u, 2))/2 + ( (fv-fu)/(v-u) - (q+2*p)*(v-u)/6 )*(x-u) + fu
for x in x_list:
if x < points[0][0] or x > points[-1][0]:
f_list.append(0)
else:
for i in range(size-1):
if points[i][0] <= x <= points[i+1][0]:
fu = points[i][1]
fv = points[i+1][1]
p = dx2_matrix[i]
q = dx2_matrix[i+1]
u = points[i][0]
v = points[i+1][0]
f_list.append(__expression(p, q, u, v, x, fv, fu))
if flag:
print(p, q)
x3 = (q-p) / (6*(v-u))
x2 = -3 * u * (q-p)/(6*(v-u)) + p/2
x1 = 3 * pow(u, 2) * (q-p) / (6*(v-u)) - u*p + ( (fv-fu)/(v-u) - (q+2*p)*(v-u)/6 )
x0 = -1*pow(u, 3)*(q-p)/(6*(v-u)) + p/2*pow(u, 2) + ( (fv-fu)/(v-u) - (q+2*p)*(v-u)/6 )*-1*u + fu
print("x = {0:.4f} の時、{1:.3f} x^3 + {2:.3f} x^2 + {3:.3f} x + {4:.3f}".format(x, x3, x2, x1, x0))
break
return f_list
if __name__ == "__main__":
"""
# lagrange()で求めた補間多項式と、元の点列をプロットしてみる
# 与えられた点列のリスト
points = [[1, 1], [2, 2], [3, 1], [4, 1], [5, 3]]
# ラグランジュの補間多項式の係数行列を求める
a_matrix = lagrange(points)
# 係数行列を多項式に変換
func_lagrange = make_polynomial(a_matrix)
# 0から8まで、0.1刻みでxとfの値のセットを求める
x_list = np.arange(0, 8, 0.1)
f_list = func_lagrange(x_list)
# プロットする
points_on_func(points, x_list, f_list)
"""
"""
# lagrange2でも同じことが出来る
points = [[1, 1], [2, 2], [3, 1], [4, 1], [5, 3]]
a_matrix = lagrange2(points, np.arange(-10, 8, 0.1))
points_on_func(points, np.arange(0, 8, 0.1), a_matrix, title="lagrange2で求めた補間多項式と元の点列をプロット")
"""
"""
# 例5を試す
points = [[1, 1], [2, 2], [3, 1], [4, 1], [5, 3]]
print(neville(points, 2.2))
"""
"""
# 知っているsinの値を用いて、sin26°を求める
points_theta = [-90, -60, -45, -30, 0, 30, 45, 60, 90]
points_x = [theta * math.pi / 180 for theta in points_theta]
points_f = np.sin(points_x)
points = [[x, f] for x, f in zip(points_x, points_f)]
print(neville(points, 26 * math.pi / 180, useallpoints=False))
print(np.sin(26 * math.pi / 180))
"""
"""
# 任意の点の数を用いて、sin26°を求める
rad_26 = 26 * math.pi / 180
points_theta = [-90, -60, -45, -30, 0, 30, 45, 60, 90]
points_x = [theta * math.pi / 180 for theta in points_theta]
points_f = np.sin(points_x)
points = [[x, f] for x, f in zip(points_x, points_f)]
ordered = sorted(points, key=lambda point: pow(point[0] - rad_26, 2))
x_list = np.linspace(-math.pi, math.pi, 1000)
f_list = np.sin(x_list)
f2 = lagrange2(ordered[0:2], x_list)
f3 = lagrange2(ordered[0:3], x_list)
f4 = lagrange2(ordered[0:4], x_list)
f5 = lagrange2(ordered[0:5], x_list)
f6 = lagrange2(ordered[0:6], x_list)
funcs(x_list, f_list, [f2, f3, f4, f5, f6], labels=["2点", "3点", "4点", "5点", "6点"], points=points, axis=rad_26)
"""
"""
func = lambda x: 1 / (25 * x**2 + 1)
points_x = np.linspace(-2, 2, 0.5)
points_f = func(points_x)
points = [[x, f] for x, f in zip(points_x, points_f)]
x_list = np.linspace(-2, 2, 1000)
f_list = func(x_list)
f1 = lagrange2(points[0:5], x_list)
f2 = lagrange2(points[0:7], x_list)
f3 = lagrange2(points[0:9], x_list)
funcs(x_list, f_list, [f1, f2, f3], labels=["5点", "7点", "9点"], points=points, axis=0)
"""
points = [[1, 1], [2, 1], [3, 2], [4, 3]]
x_list = np.arange(1, 5, 0.01)
f_list = spline(points, x_list, 1, flag=True)
points_on_func(points, x_list, f_list, xlim=[0, 8], ylim=[0, 5])
| mit |
Eric89GXL/mne-python | examples/time_frequency/plot_compute_csd.py | 20 | 3996 | """
==================================================
Compute a cross-spectral density (CSD) matrix
==================================================
A cross-spectral density (CSD) matrix is similar to a covariance matrix, but in
the time-frequency domain. It is the first step towards computing
sensor-to-sensor coherence or a DICS beamformer.
This script demonstrates the three methods that MNE-Python provides to compute
the CSD:
1. Using short-term Fourier transform: :func:`mne.time_frequency.csd_fourier`
2. Using a multitaper approach: :func:`mne.time_frequency.csd_multitaper`
3. Using Morlet wavelets: :func:`mne.time_frequency.csd_morlet`
"""
# Author: Marijn van Vliet <[email protected]>
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import csd_fourier, csd_multitaper, csd_morlet
print(__doc__)
###############################################################################
# In the following example, the computation of the CSD matrices can be
# performed using multiple cores. Set ``n_jobs`` to a value >1 to select the
# number of cores to use.
n_jobs = 1
###############################################################################
# Loading the sample dataset.
data_path = sample.data_path()
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
###############################################################################
# By default, CSD matrices are computed using all MEG/EEG channels. When
# interpreting a CSD matrix with mixed sensor types, be aware that the
# measurement units, and thus the scalings, differ across sensors. In this
# example, for speed and clarity, we select a single channel type:
# gradiometers.
picks = mne.pick_types(raw.info, meg='grad')
# Make some epochs, based on events with trigger code 1
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=1,
picks=picks, baseline=(None, 0),
reject=dict(grad=4000e-13), preload=True)
###############################################################################
# Computing CSD matrices using short-term Fourier transform and (adaptive)
# multitapers is straightforward:
csd_fft = csd_fourier(epochs, fmin=15, fmax=20, n_jobs=n_jobs)
csd_mt = csd_multitaper(epochs, fmin=15, fmax=20, adaptive=True, n_jobs=n_jobs)
###############################################################################
# When computing the CSD with Morlet wavelets, you specify the exact
# frequencies at which to compute it. For each frequency, a corresponding
# wavelet will be constructed and convolved with the signal, resulting in a
# time-frequency decomposition.
#
# The CSD is constructed by computing the correlation between the
# time-frequency representations between all sensor-to-sensor pairs. The
# time-frequency decomposition originally has the same sampling rate as the
# signal, in our case ~600Hz. This means the decomposition is over-specified in
# time and we may not need to use all samples during our CSD computation, just
# enough to get a reliable correlation statistic. By specifying ``decim=10``,
# we use every 10th sample, which will greatly speed up the computation and
# will have a minimal effect on the CSD.
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)
###############################################################################
# The resulting :class:`mne.time_frequency.CrossSpectralDensity` objects have a
# plotting function we can use to compare the results of the different methods.
# We're plotting the mean CSD across frequencies.
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')
csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')
csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
| bsd-3-clause |
cjayb/mne-python | tutorials/epochs/plot_40_epochs_to_data_frame.py | 9 | 7008 | """
.. _tut-epochs-dataframe:
Exporting Epochs to Pandas DataFrames
=====================================
This tutorial shows how to export the data in :class:`~mne.Epochs` objects to a
:class:`Pandas DataFrame <pandas.DataFrame>`, and applies a typical Pandas
:doc:`split-apply-combine <pandas:user_guide/groupby>` workflow to examine the
latencies of the response maxima across epochs and conditions.
.. contents:: Page contents
:local:
:depth: 2
We'll use the :ref:`sample-dataset` dataset, but load a version of the raw file
that has already been filtered and downsampled, and has an average reference
applied to its EEG channels. As usual we'll start by importing the modules we
need and loading the data:
"""
import os
import seaborn as sns
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
###############################################################################
# Next we'll load a list of events from file, map them to condition names with
# an event dictionary, set some signal rejection thresholds (cf.
# :ref:`tut-reject-epochs-section`), and segment the continuous data into
# epochs:
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(sample_data_events_file)
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
tmin, tmax = (-0.2, 0.5) # epoch from 200 ms before event to 500 ms after it
baseline = (None, 0) # baseline period from start of epoch to time=0
epochs = mne.Epochs(raw, events, event_dict, tmin, tmax, proj=True,
baseline=baseline, reject=reject_criteria, preload=True)
del raw
###############################################################################
# Converting an ``Epochs`` object to a ``DataFrame``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once we have our :class:`~mne.Epochs` object, converting it to a
# :class:`~pandas.DataFrame` is simple: just call :meth:`epochs.to_data_frame()
# <mne.Epochs.to_data_frame>`. Each channel's data will be a column of the new
# :class:`~pandas.DataFrame`, alongside three additional columns of event name,
# epoch number, and sample time. Here we'll just show the first few rows and
# columns:
df = epochs.to_data_frame()
df.iloc[:5, :10]
###############################################################################
# Scaling time and channel values
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, time values are converted from seconds to milliseconds and
# then rounded to the nearest integer; if you don't want this, you can pass
# ``time_format=None`` to keep time as a :class:`float` value in seconds, or
# convert it to a :class:`~pandas.Timedelta` value via
# ``time_format='timedelta'``.
#
# Note also that, by default, channel measurement values are scaled so that EEG
# data are converted to µV, magnetometer data are converted to fT, and
# gradiometer data are converted to fT/cm. These scalings can be customized
# through the ``scalings`` parameter, or suppressed by passing
# ``scalings=dict(eeg=1, mag=1, grad=1)``.
df = epochs.to_data_frame(time_format=None,
scalings=dict(eeg=1, mag=1, grad=1))
df.iloc[:5, :10]
###############################################################################
# Notice that the time values are no longer integers, and the channel values
# have changed by several orders of magnitude compared to the earlier
# DataFrame.
#
#
# Setting the ``index``
# ~~~~~~~~~~~~~~~~~~~~~
#
# It is also possible to move one or more of the indicator columns (event name,
# epoch number, and sample time) into the :ref:`index <pandas:indexing>`, by
# passing a string or list of strings as the ``index`` parameter. We'll also
# demonstrate here the effect of ``time_format='timedelta'``, yielding
# :class:`~pandas.Timedelta` values in the "time" column.
df = epochs.to_data_frame(index=['condition', 'epoch'],
time_format='timedelta')
df.iloc[:5, :10]
###############################################################################
# Wide- versus long-format DataFrames
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another parameter, ``long_format``, determines whether each channel's data is
# in a separate column of the :class:`~pandas.DataFrame`
# (``long_format=False``), or whether the measured values are pivoted into a
# single ``'value'`` column with an extra indicator column for the channel name
# (``long_format=True``). Passing ``long_format=True`` will also create an
# extra column ``ch_type`` indicating the channel type.
long_df = epochs.to_data_frame(time_format=None, index='condition',
long_format=True)
long_df.head()
###############################################################################
# Generating the :class:`~pandas.DataFrame` in long format can be helpful when
# using other Python modules for subsequent analysis or plotting. For example,
# here we'll take data from the "auditory/left" condition, pick a couple MEG
# channels, and use :func:`seaborn.lineplot` to automatically plot the mean and
# confidence band for each channel, with confidence computed across the epochs
# in the chosen condition:
channels = ['MEG 1332', 'MEG 1342']
data = long_df.loc['auditory/left'].query('channel in @channels')
# convert channel column (CategoryDtype → string; for a nicer-looking legend)
data['channel'] = data['channel'].astype(str)
sns.lineplot(x='time', y='value', hue='channel', data=data)
###############################################################################
# We can also now use all the power of Pandas for grouping and transforming our
# data. Here, we find the latency of peak activation of 2 gradiometers (one
# near auditory cortex and one near visual cortex), and plot the distribution
# of the timing of the peak in each channel as a :func:`~seaborn.violinplot`:
# sphinx_gallery_thumbnail_number = 2
df = epochs.to_data_frame(time_format=None)
peak_latency = (df.filter(regex=r'condition|epoch|MEG 1332|MEG 2123')
.groupby(['condition', 'epoch'])
.aggregate(lambda x: df['time'].iloc[x.idxmax()])
.reset_index()
.melt(id_vars=['condition', 'epoch'],
var_name='channel',
value_name='latency of peak')
)
ax = sns.violinplot(x='channel', y='latency of peak', hue='condition',
data=peak_latency, palette='deep', saturation=1)
| bsd-3-clause |
abigailStev/stingray | stingray/pulse/tests/test_search.py | 2 | 14421 | from __future__ import division, print_function
from stingray.pulse.search import epoch_folding_search, z_n_search
from stingray.pulse.search import _profile_fast, phaseogram, plot_phaseogram
from stingray.pulse.search import plot_profile
from stingray.pulse.pulsar import fold_events
import numpy as np
from stingray import Lightcurve
from stingray.events import EventList
import pytest
np.random.seed(20150907)
class TestAll(object):
"""Unit tests for the stingray.pulse.search module."""
@classmethod
def setup_class(cls):
cls.pulse_frequency = 1/0.101
cls.tstart = 0
cls.tend = 25.25
cls.tseg = cls.tend - cls.tstart
cls.dt = 0.01212
cls.times = np.arange(cls.tstart, cls.tend, cls.dt) + cls.dt / 2
cls.counts = \
100 + 20 * np.cos(2 * np.pi * cls.times * cls.pulse_frequency)
cls.gti = [[cls.tstart, cls.tend]]
lc = Lightcurve(cls.times, cls.counts, gti=cls.gti, err_dist='gauss')
events = EventList()
events.simulate_times(lc)
cls.event_times = events.time
def test_prepare(self):
pass
def test_phaseogram(self):
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency)
assert np.all(times < 25.6)
assert np.any(times > 25)
assert np.all((phases >= 0) & (phases <= 2))
def test_phaseogram_bad_weights(self):
with pytest.raises(ValueError) as excinfo:
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency,
weights=[0, 2])
assert 'must match' in str(excinfo)
def test_phaseogram_weights(self):
phaseogr, phases, times, additional_info = \
phaseogram(self.times, self.pulse_frequency, weights=self.counts,
nph=16)
assert np.all(times < 25.6)
assert np.any(times > 25)
assert np.all((phases >= 0) & (phases <= 2))
import matplotlib.pyplot as plt
fig = plt.figure('Phaseogram direct weights')
plot_phaseogram(phaseogr, phases, times)
plt.savefig('phaseogram_weights.png')
plt.close(fig)
def test_phaseogram_mjdref(self):
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency,
mjdref=57000, out_filename='phaseogram_mjdref.png')
assert np.all(times >= 57000)
assert np.all((phases >= 0) & (phases <= 2))
def test_phaseogram_mjdref_pepoch(self):
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency,
mjdref=57000, out_filename='phaseogram_mjdref.png',
pepoch=57000)
assert np.all(times >= 57000)
assert np.all((phases >= 0) & (phases <= 2))
def test_plot_phaseogram_fromfunc(self):
import matplotlib.pyplot as plt
fig = plt.figure('Phaseogram from func')
ax = plt.subplot()
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency, mjdref=57000,
pepoch=57000, phaseogram_ax=ax, plot=True)
plt.savefig('phaseogram_fromfunc.png')
plt.close(fig)
def test_plot_phaseogram_direct(self):
import matplotlib.pyplot as plt
phaseogr, phases, times, additional_info = \
phaseogram(self.event_times, self.pulse_frequency)
plot_phaseogram(phaseogr, phases, times)
plt.savefig('phaseogram_direct.png')
plt.close(plt.gcf())
def test_plot_profile(self):
import matplotlib.pyplot as plt
phase, prof, _ = fold_events(self.event_times,
self.pulse_frequency)
ax = plot_profile(phase, prof)
plt.savefig('profile_direct.png')
plt.close(plt.gcf())
def test_plot_profile_existing_ax(self):
import matplotlib.pyplot as plt
fig = plt.figure('Pulse profile')
ax = plt.subplot()
phase, prof, _ = fold_events(self.event_times,
self.pulse_frequency, ax=ax)
ax = plot_profile(phase, prof, ax=ax)
plt.savefig('profile_existing_ax.png')
plt.close(fig)
def test_plot_profile_errorbars(self):
import matplotlib.pyplot as plt
fig = plt.figure('Pulse profile')
ax = plt.subplot()
phase, prof, err = fold_events(self.event_times,
self.pulse_frequency, ax=ax)
ax = plot_profile(phase, prof, err=err, ax=ax)
plt.savefig('profile_errorbars.png')
plt.close(fig)
def test_profile_fast(self):
test_phase = np.arange(0, 1, 1/16)
prof = _profile_fast(test_phase, nbin=16)
assert np.all(prof == np.ones(16))
def test_epoch_folding_search(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = epoch_folding_search(self.event_times, frequencies,
nbin=43)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_epoch_folding_search_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = epoch_folding_search(self.event_times, frequencies,
nbin=43, fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_epoch_folding_search_fdot_longdouble(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg, dtype=np.longdouble)
fdots = np.array([-0.1, 0, 0.1], dtype=np.longdouble)
freq, fdot, stat = \
epoch_folding_search(self.event_times.astype(np.longdouble),
frequencies, nbin=43, fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_epoch_folding_search_expocorr_fails(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
with pytest.raises(ValueError) as excinfo:
freq, stat = epoch_folding_search(self.event_times, frequencies,
nbin=23, expocorr=True)
assert 'To calculate exposure correction' in str(excinfo)
def test_epoch_folding_search_expocorr(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = epoch_folding_search(self.event_times, frequencies,
nbin=42, expocorr=True, gti=self.gti)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_epoch_folding_search_expocorr_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = \
epoch_folding_search(self.event_times, frequencies,
nbin=42, expocorr=True, gti=self.gti,
fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_epoch_folding_search_weights(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = epoch_folding_search(self.times, frequencies,
nbin=16, weights=self.counts)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_epoch_folding_search_weights_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = epoch_folding_search(self.times, frequencies,
nbin=16, weights=self.counts,
fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_z_n_search(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = z_n_search(self.event_times, frequencies, nbin=25,
nharm=2)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_z_n_search_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = z_n_search(self.event_times, frequencies, nbin=25,
nharm=2, fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_z_n_search_fdot_longdouble(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg, dtype=np.longdouble)
fdots = np.array([-0.1, 0, 0.1], dtype=np.longdouble)
freq, fdot, stat = z_n_search(self.event_times.astype(np.longdouble),
frequencies, nbin=25,
nharm=2, fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_z_n_search_expocorr(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = z_n_search(self.event_times, frequencies, nbin=64,
nharm=2, expocorr=True, gti=self.gti)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_z_n_search_expocorr_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = z_n_search(self.event_times, frequencies, nbin=64,
nharm=2, expocorr=True, gti=self.gti,
fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
def test_z_n_search_expocorr_fails(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
with pytest.raises(ValueError) as excinfo:
freq, stat = z_n_search(self.event_times, frequencies, nharm=1,
nbin=35, expocorr=True)
assert 'To calculate exposure correction' in str(excinfo)
def test_z_n_search_weights(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
freq, stat = z_n_search(self.times, frequencies, nbin=44,
nharm=1, weights=self.counts)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
def test_z_n_search_weights_fdot(self):
"""Test pulse phase calculation, frequency only."""
frequencies = np.arange(9.8, 9.99, 0.1/self.tseg)
fdots = [-0.1, 0, 0.1]
freq, fdot, stat = z_n_search(self.times, frequencies, nbin=44,
nharm=1, weights=self.counts,
fdots=fdots)
minbin = np.argmin(np.abs(frequencies - self.pulse_frequency))
maxstatbin = freq.flatten()[np.argmax(stat)]
assert np.allclose(maxstatbin, frequencies[minbin], atol=0.1/self.tseg)
maxfdot = fdot.flatten()[np.argmax(stat)]
assert np.allclose(maxfdot, 0.0, atol=0.1/self.tseg)
| mit |
mehdidc/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
JPGlaser/Tycho | src/tycho/write.py | 1 | 11900 | # Python Classes/Functions used to Export Tycho's Datasets
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# TO-DO: Add time back to bookkeeping becasue of Tyler's code
# Importing Necessary System Packages
import os, io
import numpy as np
import matplotlib as plt
import random
import numpy
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.io import *
from amuse.lab import *
from amuse.couple import multiples
from amuse import io
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
# Import cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
from tycho import util
#from tycho import multiples2 as multiples
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def write_initial_state(master_set, ic_array, file_prefix):
''' Writes out an initial state for the Tycho Module.
master_set: The Master Amuse Particle Set used in Tycho
ic_array: Predefined Numpy Array that Stores Initial Conditions in SI Units
file_prefix: String Value for a Prefix to the Saved File
'''
# First, Define/Make the Directory for the Initial State to be Stored
file_dir = os.getcwd()+"/InitialState"
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_base = file_dir+"/"+file_prefix
# Second, Write the AMUSE Particle Set to a HDF5 File
file_format = "hdf5"
write_set_to_file(master_set, file_base+"_particles.hdf5", format=file_format, close_file=True)
# Third, Pickle the Initial Conditions Array
ic_file = open(file_base+"_ic.pkl", "wb")
pickle.dump(ic_array, ic_file)
ic_file.close()
def write_time_step(gravity_set, current_time, file_prefix):
''' Writes out necessary information for a time step.
master_set: The Master AMUSE Particle Set used in Tycho
multiples_code: The Multiples Instance for Tycho
current_time: The Simulations Current Time
file_prefix: String Value for a Prefix to the Saved File
'''
# First, Define/Make the Directory for the Time Step to be Stored
file_dir_MS = os.getcwd()+"/Snapshots"
if not os.path.exists(file_dir_MS):
os.makedirs(file_dir_MS)
file_base_MS = file_dir_MS+"/"+file_prefix
# Second, Write the AMUSE Particle Set to a HDF5 File
file_format = "hdf5"
write_set_to_file(gravity_set, file_base_MS+"_MS_t%.3f.hdf5" %(current_time.number), \
format=file_format, close_file=True)
# ------------------------------------ #
# WRITING RESTART FILE #
# ------------------------------------ #
def write_state_to_file(time, stars_python,gravity_code, multiples_code, write_file, cp_hist=False, backup = 0 ):
res_dir = os.getcwd()+"/Restart"
if not os.path.exists(res_dir):
os.makedirs(res_dir)
print("Writing state to write file: ", write_file,"\n\n")
if write_file is not None:
particles = gravity_code.particles.copy()
write_channel = gravity_code.particles.new_channel_to(particles)
write_channel.copy_attribute("index_in_code", "id")
bookkeeping = {'neighbor_veto': multiples_code.neighbor_veto,
'neighbor_distance_factor': multiples_code.neighbor_distance_factor,
'multiples_external_tidal_correction': multiples_code.multiples_external_tidal_correction,
'multiples_integration_energy_error': multiples_code.multiples_integration_energy_error,
'multiples_internal_tidal_correction': multiples_code.multiples_internal_tidal_correction,
'model_time': multiples_code.model_time,
'root_index': multiples.root_index
}
for root, tree in multiples_code.root_to_tree.items():
root_in_particles = root.as_particle_in_set(particles)
subset = tree.get_tree_subset().copy()
if root_in_particles is not None:
root_in_particles.components = subset
io.write_set_to_file(particles,write_file+".stars.hdf5",'hdf5',version='2.0',
append_to_file=False, copy_history=cp_hist)
io.write_set_to_file(stars_python,write_file+".stars_python.hdf5",'hdf5',version='2.0',
append_to_file=False, copy_history=cp_hist)
config = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".conf", "wb") as f:
pickle.dump(config, f)
with open(write_file + ".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
print("\nState successfully written to: ", write_file)
print(time)
if backup > 0:
io.write_set_to_file(particles,write_file+".backup.stars.hdf5",'hdf5', version='2.0',
append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+".backup.stars_python.hdf5",'hdf5',
version='2.0', append_to_file=False, copy_history=cp_hist,
close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".backup.conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + ".backup.bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
if backup > 2:
io.write_set_to_file(particles, write_file+"."+str(int(time.number))
+".stars.hdf5",'hdf5',version='2.0', append_to_file=False,
copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python, write_file+"."+str(int(time.number))
+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False,
copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + "." +str(int(time.number))+".conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + "."+str(int(time.number))+".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
# ----------------------------------------- #
# WRITING CRASH RESTART FILE #
# ----------------------------------------- #
def write_crash_save(time, stars_python,gravity_code, multiples_code, write_file, cp_hist=False, backup = 0 ):
crash_dir = os.getcwd()+"/CrashSave"
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
print("Writing state to write file: ", write_file,"\n\n")
if write_file is not None:
particles = gravity_code.particles.copy()
write_channel = gravity_code.particles.new_channel_to(particles)
write_channel.copy_attribute("index_in_code", "id")
bookkeeping = {'neighbor_veto': multiples_code.neighbor_veto,
'neighbor_distance_factor': multiples_code.neighbor_distance_factor,
'multiples_external_tidal_correction': multiples_code.multiples_external_tidal_correction,
'multiples_integration_energy_error': multiples_code.multiples_integration_energy_error,
'multiples_internal_tidal_correction': multiples_code.multiples_internal_tidal_correction,
'model_time': multiples_code.model_time,
'root_index': multiples.root_index
}
'''
bookkeeping.neighbor_veto =
bookkeeping.multiples_external_tidal_correction = multiples_code.multiples_external_tidal_correction
bookkeeping.multiples_integration_energy_error = multiples_code.multiples_integration_energy_error
bookkeeping.multiples_internal_tidal_correction = multiples_code.multiples_internal_tidal_correction
bookkeeping.model_time = multiples_code.model_time
'''
for root, tree in multiples_code.root_to_tree.items():
#multiples.print_multiple_simple(tree,kep)
root_in_particles = root.as_particle_in_set(particles)
subset = tree.get_tree_subset().copy()
if root_in_particles is not None:
root_in_particles.components = subset
io.write_set_to_file(particles,write_file+".stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist)
io.write_set_to_file(stars_python,write_file+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist)
config = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".conf", "wb") as f:
pickle.dump(config, f)
with open(write_file + ".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
print("\nState successfully written to: ", write_file)
print(time)
if backup > 0:
io.write_set_to_file(particles,write_file+".backup.stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+".backup.stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".backup.conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + ".backup.bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
if backup > 2:
io.write_set_to_file(particles,write_file+"."+str(int(time.number))+".stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+"."+str(int(time.number))+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + "." +str(int(time.number))+".conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + "."+str(int(time.number))+".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
| mit |
johannfaouzi/pyts | pyts/approximation/tests/test_sfa.py | 1 | 2873 | """Testing for Symbolic Fourier Approximation."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
import pytest
from sklearn.feature_selection import f_classif
from pyts.approximation import MultipleCoefficientBinning
from pyts.approximation import SymbolicFourierApproximation
rng = np.random.RandomState(42)
n_samples, n_timestamps = 5, 8
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(2, size=n_samples)
def _compute_expected_results(X, y=None, n_coefs=None, n_bins=4,
strategy='quantile', drop_sum=False, anova=False,
norm_mean=False, norm_std=False, alphabet=None):
"""Compute the expected results."""
X = np.asarray(X)
if norm_mean:
X -= X.mean(axis=1)[:, None]
if norm_std:
X /= X.std(axis=1)[:, None]
X_fft = np.fft.rfft(X)
X_fft = np.vstack([np.real(X_fft), np.imag(X_fft)])
X_fft = X_fft.reshape(n_samples, -1, order='F')
if drop_sum:
X_fft = X_fft[:, 2:-1]
else:
X_fft = np.hstack([X_fft[:, :1], X_fft[:, 2:-1]])
if n_coefs is not None:
if anova:
_, p = f_classif(X_fft, y)
support = np.argsort(p)[:n_coefs]
X_fft = X_fft[:, support]
else:
X_fft = X_fft[:, :n_coefs]
mcb = MultipleCoefficientBinning(n_bins=n_bins, strategy=strategy,
alphabet=alphabet)
arr_desired = mcb.fit_transform(X_fft)
return arr_desired
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_actual_results(params):
"""Test that the actual results are the expected ones."""
arr_actual = SymbolicFourierApproximation(**params).fit_transform(X, y)
arr_desired = _compute_expected_results(X, y, **params)
np.testing.assert_array_equal(arr_actual, arr_desired)
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_fit_transform(params):
"""Test that fit and transform yield the same results as fit_transform."""
arr_1 = SymbolicFourierApproximation(**params).fit(X, y).transform(X)
arr_2 = SymbolicFourierApproximation(**params).fit_transform(X, y)
np.testing.assert_array_equal(arr_1, arr_2)
| bsd-3-clause |
ishanic/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
weissj3/MWTools | SGRBHB/VgsrDistanceCuts.py | 2 | 3854 | import sys
sys.path.insert(0, '../Newby-tools/utilities')
import math as ma
import matplotlib.pyplot as plt
import astro_coordinates as ac
def Create_Line(x0, x1, y0, y1):
m = (y1 - y0)/(x1 - x0)
b = y1 - m * x1
return m,b
file = sys.argv[1]
f = open(file,"r")
cols = f.readline().split(",")
values = []
for i in cols:
values.append([])
for line in f:
ln = line.split(",")
for i in range(len(ln)):
values[i].append(ln[i])
#for i in range(len(objID)):
# for j in range(i+1, len(objID)):
# if objID[i] == objID[j]:
# print objID[i]
values.append(map(ac.rv_to_vgsr, map(float,values[23]), map(float,values[12]), map(float,values[13])))
CutValues = []
for i in range(len(values)):
CutValues.append([])
##########################################
### Cut Lower Blob ###
##########################################
line0x0 = 17.0
line0x1 = 19.75
line0y0 = -25.
line0y1 = 0.
line1x0 = 17.0
line1x1 = 19.75
line1y0 = -160.
line1y1 = -100.
m0, b0 = Create_Line(line0x0, line0x1, line0y0, line0y1)
m1, b1 = Create_Line(line1x0, line1x1, line1y0, line1y1)
x0 = []
x1 = []
for i in range(0,100):
x0.append((line0x0 - line0x1) * i/100.0 + line0x1)
x1.append((line1x0 - line1x1) * i/100.0 + line1x1)
y0 = [((m0 * i) + b0) for i in x0]
y1 = [((m1 * i) + b1) for i in x1]
############################################
### Cut Upper Blob ###
############################################
line2x0 = 17.7
line2x1 = 19.17
line2y0 = 50.0
line2y1 = 120.0
line3x0 = 17.7
line3x1 = 19.17
line3y0 = 120.
line3y1 = 200.
m2, b2 = Create_Line(line2x0, line2x1, line2y0, line2y1)
m3, b3 = Create_Line(line3x0, line3x1, line3y0, line3y1)
x2 = []
x3 = []
for i in range(0,100):
x2.append((line2x0 - line2x1) * i/100.0 + line2x1)
x3.append((line3x0 - line3x1) * i/100.0 + line3x1)
y2 = [((m2 * i) + b2) for i in x2]
y3 = [((m3 * i) + b3) for i in x3]
for i in range(len(values[0])):
#if (values[26][i] > line1y0 and values[26][i] < line0y1 and float(values[3][i]) > line0x0 and float(values[3][i]) < line1x1 and values[26][i] < (m0 * float(values[3][i])) + b0 and values[26][i] > (m1 * float(values[3][i])) + b1):
if (values[26][i] > -120. and values[26][i] < -25. and float(values[3][i]) > 16.8 and float(values[3][i]) < 17.8):
#if (values[26][i] < -67.5 and values[26][i] > -120. and float(values[3][i]) > line0x0 and float(values[3][i]) < line1x1):
#if (values[26][i] > line1y0 and values[26][i] < line0y1 and float(values[3][i]) > line0x0 and float(values[3][i]) < line1x1):
for j in range(len(values)):
CutValues[j].append(values[j][i])
#if (values[26][i] > 50.0 and values[26][i] < 200. and float(values[3][i]) > 17.7 and float(values[3][i]) < 19.17 and values[26][i] > (m2 * float(values[3][i])) + b2 and values[26][i] < (m3 * float(values[3][i])) + b3):
# for j in range(len(values)):
# CutValues[j].append(values[j][i])
plt.subplot(2,2,1)
plt.plot(map(float, CutValues[10]), map(float, CutValues[11]), 'o', ms=1.2)
plt.xlim([110, 230])
plt.ylim([-5,60])
plt.xlabel("RA")
plt.ylabel("Dec")
plt.title("Remade Field of Streams")
ax = plt.gca()
ax.invert_xaxis()
plt.subplot(2,2,2)
plt.plot(map(float, CutValues[3]), map(float, CutValues[26]), 'o', ms=1.2)
plt.xlabel("$g_o$")
plt.ylabel("$v_{gsr}$")
plt.title("Stars in Cut")
plt.subplot(2,2,3)
plt.plot(map(float, values[3]), map(float, values[26]), 'o', ms=1.2)
plt.plot(x0, y0)
plt.plot(x1, y1)
plt.xlabel("$g_o$")
plt.ylabel("$v_{gsr}$")
plt.title("All Stars $v_{gsr}$ vs $g_o$")
plt.subplot(2,2,4)
plt.hist(map(float, CutValues[26]), bins=30)
plt.ylabel("Counts")
plt.xlabel("$v_{gsr}$")
plt.title("Histogram of cut stars")
#plt.plot(x2, y2)
#plt.plot(x3, y3)
#plt.plot(map(float, values[3]), values[26], 'o', ms=1.2)
plt.show()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/cbook/deprecation.py | 2 | 7076 | import warnings
import functools
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute',
addendum=''):
if not message:
if pending:
message = (
'The %(name)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(name)s %(obj_type)s was deprecated in version '
'%(since)s.')
altmessage = ''
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
if addendum:
message += addendum
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum=''):
"""
Used to display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, addendum=''):
"""
Decorator to mark a function or a class as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the object,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
object. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object; if not provided the name
is automatically determined from the passed in object,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative object that the user may use in place of the
deprecated object. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, addendum=addendum):
import textwrap
if not name:
name = obj.__name__
if isinstance(obj, type):
obj_type = "class"
old_doc = obj.__doc__
func = obj.__init__
def finalize(wrapper, new_doc):
try:
obj.__doc__ = new_doc
except (AttributeError, TypeError):
# cls.__doc__ is not writeable on Py2.
# TypeError occurs on PyPy
pass
obj.__init__ = wrapper
return obj
else:
obj_type = "function"
if isinstance(obj, classmethod):
func = obj.__func__
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return classmethod(wrapper)
else:
func = obj
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return wrapper
message = _generate_deprecation_message(
since, message, name, alternative, pending,
obj_type, addendum)
def wrapper(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = textwrap.dedent(old_doc or '').strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
| mit |
MatteusDeloge/opengrid | opengrid/library/tests/test_fluksoapi.py | 3 | 7958 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 02:37:25 2013
@author: roel
"""
import os, sys
import unittest
import inspect
import numpy as np
import pdb
import datetime as dt
import pandas as pd
import pytz
test_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# add the path to opengrid to sys.path
sys.path.append(os.path.join(test_dir, os.pardir, os.pardir))
from opengrid.library import fluksoapi
class FluksoapiTest(unittest.TestCase):
"""
Class for testing the module fluksoapi
"""
def test_consolidate_single(self):
"""Return abspath if a single file found"""
datafolder = os.path.join(test_dir, 'data')
self.assertRaises(ValueError, fluksoapi.consolidate_sensor, datafolder, 'f81fb35a62f59a987d8eea3ffc845ed0')
csv_expected = os.path.join(datafolder, 'FL12345678_sensorS_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv' )
self.assertEqual(csv_expected,
fluksoapi.consolidate_sensor(datafolder, 'sensorS'))
def test_consolidate_multiple(self):
"""Consolidate and return single filename if more than one file found"""
datafolder = os.path.join(test_dir, 'data')
csv_expected = os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_16-01-00.csv' )
self.assertEqual(csv_expected, fluksoapi.consolidate_sensor(datafolder, 'sensorD'))
os.remove(csv_expected)
def test_consolidate_raises(self):
"""Raise ValueError if no file found"""
datafolder = os.path.join(test_dir, 'data')
self.assertRaises(ValueError, fluksoapi.consolidate_sensor, datafolder, 'thissensordoesnotexist')
def test_consolidate(self):
"""Consolidating 2 files and checking variable"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorD')
ts1 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertTrue(np.isnan(ts1['sensorD'].loc[dt.datetime(2014,1,8,8,0,0, tzinfo=pytz.UTC)]))
ts2 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
#ts = fluksoapi.load_file(os.path.join(datafolder, 'f81fb35a62f59a987d8eea3ffc845ed0_FROM_2014-01-07_08-02-00_TO_2014-01-08_16-01-00.csv'))
#pdb.set_trace()
ts = fluksoapi.load_file(new_csv)
self.assertEqual(ts.index[0], ts1.index[0])
self.assertEqual(ts.index[-1], ts2.index[-1])
self.assertEqual(ts['sensorD'].loc['2014-01-08 08:00:00'], 1120.0, "Last file should overwrite identical indices")
os.remove(new_csv)
def test_consolidate_with_hidden_file(self):
"""Consolidate should skip hidden file"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorH')
self.assertEqual(new_csv, os.path.join(datafolder, 'FL12345678_sensorH_FROM_2014-01-07_12-02-00_TO_2014-01-08_16-01-00.csv'))
os.remove(new_csv)
def test_consolidate_single_file(self):
"""Consolidating a single file should NOT consolidate but should return the file"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorS')
self.assertEqual(new_csv, os.path.join(datafolder,'FL12345678_sensorS_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
def test_consolidate_day(self):
"""Consolidating 2 files for a single day and checking variable"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorD',
dt_day = dt.datetime(2014,1,7))
ts1 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertTrue(np.isnan(ts1['sensorD'].loc[dt.datetime(2014,1,8,8,0,0, tzinfo=pytz.UTC)]))
ts2 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
ts = fluksoapi.load_file(new_csv)
self.assertEqual(ts.index[0], ts1.index[0])
self.assertEqual(ts.index[-1], dt.datetime(2014,1,8,0,0,0, tzinfo=pytz.UTC))
os.remove(new_csv)
def test_load_file(self):
"""load_file should return a pandas dataframe with localized index (UTC)"""
datafolder = os.path.join(test_dir, 'data')
df = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(df.index.tz, pytz.UTC, "the tz is {} instead of UTC".format(df.index.tz))
self.assertListEqual(df.columns.tolist(), ['sensorD'])
def test_parse_date_from_datetime(self):
"""Parsing a datetime into a pandas.Timestamp"""
BXL = pytz.timezone('Europe/Brussels')
dt_ = BXL.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date(dt_)
self.assertEqual(pts.value/1e9, epoch_expected)
def test_parse_date_from_datetime_naive(self):
"""Parsing a naïve datetime into a pandas.Timestamp makes it UTC"""
dt_ = pytz.UTC.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date(dt.datetime(2014,11,23,1,2,3))
self.assertEqual(pts.value/1e9, epoch_expected)
def test_parse_date_from_posix(self):
"""Parsing a float"""
pts = fluksoapi._parse_date(1416778251.460574)
self.assertEqual(1416778251.460574, pts.value/1e9)
def test_parse_date_from_string(self):
"""Parsing some commong types of strings"""
dt_ = pytz.UTC.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date('20141123 01:02:03')
self.assertEqual(pts.value/1e9, epoch_expected)
pts = fluksoapi._parse_date('2014-11-23 01:02:03')
self.assertEqual(pts.value/1e9, epoch_expected)
pts = fluksoapi._parse_date('2014-11-23T010203')
self.assertEqual(pts.value/1e9, epoch_expected)
if __name__ == '__main__':
#http://stackoverflow.com/questions/4005695/changing-order-of-unit-tests-in-python
ln = lambda f: getattr(FluksoapiTest, f).im_func.func_code.co_firstlineno
lncmp = lambda _, a, b: cmp(ln(a), ln(b))
unittest.TestLoader.sortTestMethodsUsing = lncmp
suite1 = unittest.TestLoader().loadTestsFromTestCase(FluksoapiTest)
alltests = unittest.TestSuite([suite1])
#selection = unittest.TestSuite()
#selection.addTest(HouseprintTest('test_get_sensor'))
unittest.TextTestRunner(verbosity=1, failfast=False).run(alltests)
| apache-2.0 |
pothosware/gnuradio | gr-filter/examples/chirp_channelize.py | 58 | 7169 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
kubeflow/pipelines | components/ibm-components/watson/manage/monitor_fairness/src/monitor_fairness.py | 3 | 3978 | import json
import argparse
import re
from ibm_ai_openscale import APIClient
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
from minio import Minio
import pandas as pd
def get_secret_creds(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, help='Deployed model name', default='AIOS Spark German Risk Model - Final')
parser.add_argument('--fairness_threshold', type=float, help='Amount of threshold for fairness monitoring', default=0.95)
parser.add_argument('--fairness_min_records', type=int, help='Minimum amount of records for performing a fairness monitor', default=5)
parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default='aios.json')
parser.add_argument('--cos_bucket_name', type=str, help='Object storage bucket name', default='bucket-name')
parser.add_argument('--data_filename', type=str, help='Name of the data binary', default="")
args = parser.parse_args()
model_name = args.model_name
fairness_threshold = args.fairness_threshold
fairness_min_records = args.fairness_min_records
cos_bucket_name = args.cos_bucket_name
aios_manifest_path = args.aios_manifest_path
data_filename = args.data_filename
aios_guid = get_secret_creds("/app/secrets/aios_guid")
cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key")
cos_endpoint = get_secret_creds("/app/secrets/cos_endpoint")
cos_access_key = get_secret_creds("/app/secrets/cos_access_key")
cos_secret_key = get_secret_creds("/app/secrets/cos_secret_key")
''' Remove possible http scheme for Minio '''
url = re.compile(r"https?://")
cos_endpoint = url.sub('', cos_endpoint)
''' Upload data to Cloud object storage '''
cos = Minio(cos_endpoint,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_bucket_name, aios_manifest_path, 'aios.json')
print('Fairness definition file ' + aios_manifest_path + ' is downloaded')
cos.fget_object(cos_bucket_name, data_filename, data_filename)
pd_data = pd.read_csv(data_filename, sep=",", header=0, engine='python')
print('training data ' + data_filename + ' is downloaded and loaded')
""" Load manifest JSON file """
with open('aios.json') as f:
aios_manifest = json.load(f)
""" Initiate AIOS client """
AIOS_CREDENTIALS = {
"instance_guid": aios_guid,
"apikey": cloud_api_key,
"url": "https://api.aiopenscale.cloud.ibm.com"
}
ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS)
print('AIOS client version:' + ai_client.version)
''' Setup fairness monitoring '''
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name:
subscription = ai_client.data_mart.subscriptions.get(sub)
feature_list = []
for feature in aios_manifest['fairness_features']:
feature_list.append(Feature(feature['feature_name'], majority=feature['majority'], minority=feature['minority'], threshold=feature['threshold']))
subscription.fairness_monitoring.enable(
features=feature_list,
favourable_classes=aios_manifest['fairness_favourable_classes'],
unfavourable_classes=aios_manifest['fairness_unfavourable_classes'],
min_records=fairness_min_records,
training_data=pd_data
)
run_details = subscription.fairness_monitoring.run()
print('Fairness monitoring is enabled.')
| apache-2.0 |
gaohr/SEIMS | postprocess/hydroPlot.py | 2 | 21432 | # -*- coding: utf-8 -*-
import datetime
import os
import matplotlib
if os.name != 'nt':
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy
from config import *
def getDayByDay(timeStart, timeEnd):
oneday = datetime.timedelta(days=1)
timeArr = [timeStart]
while timeArr[len(timeArr) - 1] < timeEnd:
tempday = timeArr[len(timeArr) - 1] + oneday
timeArr.append(tempday)
return timeArr
## DateTime
def GetDateArr(timeStart, timeEnd):
TIME_Start = datetime.datetime.strptime(timeStart, "%Y-%m-%d")
TIME_End = datetime.datetime.strptime(timeEnd, "%Y-%m-%d")
dateArr = getDayByDay(TIME_Start, TIME_End)
# print dateArr
return dateArr
## Precipatation
def GetPreciObs(timeStart, timeEnd, ClimateDB, SpatialDB):
TIME_Start = datetime.datetime.strptime(timeStart, "%Y-%m-%d")
TIME_End = datetime.datetime.strptime(timeEnd, "%Y-%m-%d")
preci = []
siteListstr = SpatialDB.SITELIST.find_one()['SITELISTP'].split(',')
siteList = []
allSiteValue = []
for s in range(len(siteListstr)):
siteList.append(int(siteListstr[s]))
allSiteValue.append([])
siteArr = numpy.array(siteList)
for pdata in ClimateDB.DATA_VALUES.find({'LOCALDATETIME': {"$gte": TIME_Start, '$lte': TIME_End}, 'TYPE': 'P'}):
if len(numpy.where(siteArr == pdata['STATIONID'])[0]) > 0:
siteIndex = numpy.where(siteArr == pdata['STATIONID'])[0][0]
allSiteValue[siteIndex].append(pdata['VALUE'])
# print type(pdata['STATIONID'])
## Sum of all sites value and average
for i in range(len(allSiteValue[0])):
preci.append(sum([x[i] for x in allSiteValue]) / len(siteArr))
# print preci
return preci
## Search observed value
def SearchObs(timeStart, timeEnd, sim, ClimateDB):
TIME_Start = datetime.datetime.strptime(timeStart, "%Y-%m-%d")
TIME_End = datetime.datetime.strptime(timeEnd, "%Y-%m-%d")
simNameArr = sim.split('_')
# print simNameArr
obsValue = []
obsDate = []
fieldList = ClimateDB.collection_names()
if "MEASUREMENT" in fieldList:
for obs in ClimateDB.MEASUREMENT.find({
'LOCALDATETIME': {"$gte": TIME_Start, '$lte': TIME_End}, 'TYPE': simNameArr[len(simNameArr) - 1]}):
# print obs['TYPE']
obsValue.append(obs['VALUE'])
obsDate.append(obs['LOCALDATETIME'])
print (obsValue)
dateArr = GetDateArr(timeStart, timeEnd)
obsValueArr = numpy.zeros(len(dateArr))
if len(obsValue) > 0:
for s in range(len(dateArr)):
for t in range(len(obsDate)):
if dateArr[s] == obsDate[t]:
obsValueArr[s] = obsValue[t]
return (obsValueArr, obsValue)
else:
return [[-1]]
else:
# print "None"
return [[-1]]
def getObservedParameter(name):
if '_' in name:
name = name.split('_')[1]
return name
def getBaseVariableName(name):
name = getObservedParameter(name)
if 'Conc' in name:
name = name.split('Conc')[0]
return name
def SearchObs2(timeStart, timeEnd, paramName, subbasinID, ClimateDB):
'''
Look up the observed data according to time range, parameter name, and subbasinID.
The observed dates and the corresponding values will be returned.
:param timeStart:
:param timeEnd:
:param paramName:
:param subbasinID:
:param ClimateDB:
:return:
'''
TIME_Start = datetime.datetime.strptime(timeStart, "%Y-%m-%d")
TIME_End = datetime.datetime.strptime(timeEnd, "%Y-%m-%d")
# simNameArr = paramName.split('_') # no need to do
# print simNameArr
obsValue = []
obsDate = []
fieldList = ClimateDB.collection_names()
if Tag_ClimateDB_Measurement.upper() not in fieldList:
return None, None
if subbasinID == 0: # for the whole basin
siteItems = ClimateDB[Tag_ClimateDB_Sites.upper()].find_one({
'TYPE': getBaseVariableName(paramName),
'ISOUTLET': 1,
}) # this should be unique
if siteItems is None:
return None, None
siteID = siteItems['STATIONID']
for obs in ClimateDB[Tag_ClimateDB_Measurement.upper()].find({
'LOCALDATETIME': {"$gte": TIME_Start, '$lte': TIME_End},
'TYPE': getObservedParameter(paramName),
'STATIONID': siteID}):
# print obs['TYPE']
if obs['VALUE'] > 0:
obsValue.append(obs['VALUE'])
obsDate.append(obs['LOCALDATETIME'])
else: # TODO, not finised yet!
pass
# dateArr = GetDateArr(timeStart, timeEnd)
# obsValueArr = numpy.zeros(len(dateArr))
if len(obsValue) > 0:
return obsDate, obsValue
# for s in range(len(dateArr)):
# for t in range(len(obsDate)):
# if dateArr[s] == obsDate[t]:
# obsValueArr[s] = obsValue[t]
# print obsValueArr
# return (obsValueArr, obsValue)
else:
return None, None
LFs = ['\r\n', '\n\r', '\r', '\n']
def ReadSimfromTxt(timeStart, timeEnd, dataDir, sim, subbasinID = 0):
TIME_Start = datetime.datetime.strptime(timeStart, "%Y-%m-%d")
TIME_End = datetime.datetime.strptime(timeEnd, "%Y-%m-%d")
## Read simulation txt
simData = "%s/%d_%s.txt" % (dataDir, subbasinID, sim)
# whether the text file existed?
if not os.path.isfile(simData):
raise IOError("%s is not existed, please check the configuration!" % simData)
simulate = []
if os.path.exists(simData):
simfile = open(simData, "r")
while True:
line = simfile.readline()
# print line[0]
if line:
for LF in LFs:
if LF in line:
line = line.split(LF)[0]
break
strList = SplitStr(StripStr(line), spliters=" ")
if len(strList) == 3:
dateStr = strList[0] + " " + strList[1]
simDatetime = datetime.datetime.strptime(dateStr, "%Y-%m-%d %H:%M:%S")
if simDatetime >= TIME_Start and simDatetime <= TIME_End:
if isNumericValue(strList[2]):
simulate.append(float(strList[2]))
else:
simulate.append(0.)
else:
break
simfile.close()
if sim == "SED":
simulate_sed = simulate[1:]
simulate_sed.append(0.)
return simulate_sed
else:
return simulate
else:
raise IOError("%s is not exist" % simData)
## Calculate Nash coefficient
# def NashCoef(qObs, qSimu, obsNum=9999):
# n = len(qObs)
# ave = sum(qObs) / n
# a1 = 0
# a2 = 0
# for i in range(n):
# if qObs[i] != 0:
# a1 = a1 + pow(float(qObs[i]) - float(qSimu[i]), 2)
# a2 = a2 + pow(float(qObs[i]) - ave, 2)
# if a2 == 0:
# a2 = 1.e-6
# if obsNum > 1:
# return "%.3f" % round(1 - a1 / a2, 3)
# else:
# return "NAN"
# Move to preprocess/util.py
# def NashCoef2(qObs, qSimu):
# '''
# Calculate Nash coefficient
# :param qObs:
# :param qSimu:
# :return: NSE, numeric value
# '''
# if len(qObs) != len(qSimu):
# raise ValueError("The size of observed and simulated values must be the same for NSE calculation!")
# n = len(qObs)
# ave = sum(qObs) / n
# a1 = 0.
# a2 = 0.
# for i in range(n):
# a1 += pow(float(qObs[i]) - float(qSimu[i]), 2.)
# a2 += pow(float(qObs[i]) - ave, 2.)
# if a2 == 0.:
# return 1.
# return 1. - a1 / a2
# Move to preprocess/util.py
# def RSquare2(qObs, qSimu):
# '''
# Calculate R-square
# :param qObs:
# :param qSimu:
# :return: R-square numeric value
# '''
# if len(qObs) != len(qSimu):
# raise ValueError("The size of observed and simulated values must be the same for R-square calculation!")
# n = len(qObs)
# obsAvg = sum(qObs) / n
# predAvg = sum(qSimu) / n
# obsMinusAvgSq = 0.
# predMinusAvgSq = 0.
# obsPredMinusAvgs = 0.
# for i in range(n):
# obsMinusAvgSq += pow((qObs[i] - obsAvg), 2.)
# predMinusAvgSq += pow((qSimu[i] - predAvg), 2.)
# obsPredMinusAvgs += (qObs[i] - obsAvg) * (qSimu[i] - predAvg)
# # Calculate R-square
# yy = (pow(obsMinusAvgSq, 0.5) * pow(predMinusAvgSq, 0.5))
# if yy == 0.:
# return 1.
# return pow((obsPredMinusAvgs / yy), 2.)
## Calculate R2
# def RSquare(qObs, qSimu, obsNum=9999):
# n = len(qObs)
# sim = []
# for k in range(n):
# sim.append(float(qSimu[k]))
# obsAvg = sum(qObs) / n
# predAvg = sum(sim) / n
# obsMinusAvgSq = 0
# predMinusAvgSq = 0
# obsPredMinusAvgs = 0
# for i in range(n):
# if qObs[i] != 0:
# obsMinusAvgSq = obsMinusAvgSq + pow((qObs[i] - obsAvg), 2)
# predMinusAvgSq = predMinusAvgSq + pow((sim[i] - predAvg), 2)
# obsPredMinusAvgs = obsPredMinusAvgs + (qObs[i] - obsAvg) * (sim[i] - predAvg)
# ## Calculate RSQUARE
# yy = (pow(obsMinusAvgSq, 0.5) * pow(predMinusAvgSq, 0.5))
# if yy == 0:
# yy = 1.e-6
# RSquare = round(pow((obsPredMinusAvgs / yy), 2), 3)
# if obsNum > 1:
# return "%.3f" % RSquare
# else:
# return "NAN"
# Deprecated code which should be removed in next version.
# def CreatePlot(sim_date, flow, preci, simList, vari_Sim, model_dir, ClimateDB):
# # print datetime.datetime.strftime("%Y-%m-%:%Md %H:%S", sim_date[0])
# # print type(sim_date[0])
# # set ticks direction, in or out
# plt.rcParams['xtick.direction'] = 'out'
# plt.rcParams['ytick.direction'] = 'out'
# timeStart = datetime.date.strftime(sim_date[0], "%Y-%m-%d")
# timeEnd = datetime.date.strftime(sim_date[len(sim_date) - 1], "%Y-%m-%d")
# for i in range(len(vari_Sim)):
# # plt.figure(i)
# fig, ax = plt.subplots(figsize=(12, 4))
# if vari_Sim[i] == "Q":
# ylabelStr = vari_Sim[i]
# if vari_Sim[i] == "Q":
# ylabelStr += " (m$^3$/s)"
# else:
# ylabelStr += " (kg)"
# obs = SearchObs(timeStart, timeEnd, vari_Sim[i], ClimateDB)
# if obs[0][0] != -1:
# # Flow with observed value
# p1 = ax.bar(sim_date, obs[0], label="Observation", color="none", edgecolor='black',
# linewidth=1, align="center", hatch="//")
# p2, = ax.plot(sim_date, simList[i], label="Simulation", color="black",
# marker="o", markersize=2, linewidth=1)
# plt.xlabel('Date')
# # format the ticks date axis
# # autodates = mdates.AutoDateLocator()
# days = mdates.DayLocator(bymonthday=range(1, 32), interval=4)
# months = mdates.MonthLocator()
# dateFmt = mdates.DateFormatter('%m-%d')
# ax.xaxis.set_major_locator(months)
# ax.xaxis.set_major_formatter(dateFmt)
# ax.xaxis.set_minor_locator(days)
# ax.tick_params('both', length=5, width=2, which='major')
# ax.tick_params('both', length=3, width=1, which='minor')
# # fig.autofmt_xdate()
#
# plt.ylabel(ylabelStr)
# # plt.legend(bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
# ax.set_ylim(float(min(simList[i])) * 0.8, float(max(simList[i])) * 1.8)
# ax2 = ax.twinx()
# ax.tick_params(axis='x', which='both', bottom='on', top='off')
# ax2.tick_params('y', length=5, width=2, which='major')
# ax2.set_ylabel(r"Precipitation (mm)")
# p3 = ax2.bar(sim_date, preci, label="Rainfall", color="black", linewidth=0, align="center")
# ax2.set_ylim(float(max(preci)) * 1.8, float(min(preci)) * 0.8)
# ax.legend([p3, p1, p2], ["Rainfall", "Observation", "Simulation"], bbox_to_anchor=(0.03, 0.85), loc=2,
# shadow=True)
# plt.title("Simulation of %s\n" % vari_Sim[i], color="#aa0903")
# plt.title("\nNash: %s, R$^2$: %s" %
# (NashCoef(obs[0], simList[i], len(obs[1])),
# RSquare(obs[0], simList[i], len(obs[1]))),
# color="red", loc='right')
# plt.tight_layout()
# plt.savefig(model_dir + os.sep + vari_Sim[i] + ".png")
# else:
# # Flow without observed value
# p2, = ax.plot(sim_date, simList[i], label="Simulation", color="black",
# marker="o", markersize=2, linewidth=1)
# plt.xlabel('Date')
# # format the ticks date axis
# # autodates = mdates.AutoDateLocator()
# days = mdates.DayLocator(bymonthday=range(1, 32), interval=4)
# months = mdates.MonthLocator()
# dateFmt = mdates.DateFormatter('%m-%d')
# ax.xaxis.set_major_locator(months)
# ax.xaxis.set_major_formatter(dateFmt)
# ax.xaxis.set_minor_locator(days)
# ax.tick_params('both', length=5, width=2, which='major')
# ax.tick_params('both', length=3, width=1, which='minor')
# # fig.autofmt_xdate()
#
# plt.ylabel(ylabelStr)
# # plt.legend(bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
# ax.set_ylim(float(min(simList[i])) * 0.8, float(max(simList[i])) * 1.8)
# ax2 = ax.twinx()
# ax.tick_params(axis='x', which='both', bottom='on', top='off')
# ax2.tick_params('y', length=5, width=2, which='major')
# ax2.set_ylabel(r"Precipitation (mm)")
# p3 = ax2.bar(sim_date, preci, label="Rainfall", color="black", linewidth=0, align="center")
# ax2.set_ylim(float(max(preci)) * 1.8, float(min(preci)) * 0.8)
# ax.legend([p3, p2], ["Rainfall", "Simulation"], bbox_to_anchor=(0.03, 0.85), loc=2,
# shadow=True)
# plt.title("Simulation of %s\n" % vari_Sim[i], color="#aa0903")
# plt.tight_layout()
# plt.savefig(model_dir + os.sep + vari_Sim[i] + ".png")
#
# else:
# obs = SearchObs(timeStart, timeEnd, vari_Sim[i], ClimateDB)
# if obs[0][0] != -1:
# # Simulation with observed value
# plt.bar(sim_date, obs[0], label = "Observation", color = "green", linewidth = 0, align = "center")
# plt.plot(sim_date, simList[i], label = "Simulation", color = "black",
# marker = "o", markersize = 1, linewidth = 1)
# plt.xlabel('Date')
# plt.ylabel(vari_Sim[i])
# plt.legend(bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
# ax.set_ylim(float(min(simList[i])) * 0.8, float(max(simList[i])) * 1.8)
# ax2 = ax.twinx()
# ax2.set_ylabel(r"Flow (m$^3$/s)")
# ax2.plot(sim_date, flow, label = "Flow", color = "blue", linewidth = 1)
# ax2.set_ylim(float(max(flow)) * 1.8, float(min(flow)) * 0.8)
# plt.title("Simulation of %s \n" % vari_Sim[i], color = "#aa0903")
# plt.title("\nNash: %s, R$^2$: %s" %
# (NashCoef(obs[0], simList[i], len(obs[1])),
# str(RSquare(obs[0], simList[i], len(obs[1])))), color = "red", loc = 'right')
# else:
# # Simulation without observed value
# plt.plot(sim_date, simList[i], label = "Simulation", color = "green",
# marker = "o", markersize = 1, linewidth = 1)
# plt.xlabel('Date')
# plt.ylabel(vari_Sim[i])
# plt.legend(bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
# ax.set_ylim(0, float(max(simList[i])) * 1.8 + 1)
# ax2 = ax.twinx()
# ax2.set_ylabel(r"Flow (m$^3$/s)")
# ax2.plot(sim_date, flow, label = "Flow", color = "blue", linewidth = 1)
# ax2.set_ylim(float(max(flow)) * 1.8, float(min(flow)) * 0.8)
# plt.title("Simulation of %s \n" % vari_Sim[i], color = "#aa0905")
# plt.show()
def CreatePlot2(sim_date, preci, simList, vari_Sim, model_dir, ClimateDB):
'''
Create hydrographs of discharge, sediment, nutrient (amount or concentrate), etc.
:param sim_date:
:param preci:
:param simList:
:param vari_Sim:
:param model_dir:
:param ClimateDB:
:return:
'''
# print datetime.datetime.strftime("%Y-%m-%:%Md %H:%S", sim_date[0])
# print type(sim_date[0])
# set ticks direction, in or out
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
timeStart = datetime.date.strftime(sim_date[0], "%Y-%m-%d")
timeEnd = datetime.date.strftime(sim_date[len(sim_date) - 1], "%Y-%m-%d")
for i in range(len(vari_Sim)):
# plt.figure(i)
fig, ax = plt.subplots(figsize = (12, 4))
# if vari_Sim[i] == "Q":
ylabelStr = vari_Sim[i]
if vari_Sim[i] in ["Q", "Qi", "QG", "QS"]:
ylabelStr += " (m$^3$/s)"
elif "CONC" in vari_Sim[i].upper(): # Concentrate
ylabelStr += " (mg/L)"
else: # amount
ylabelStr += " (kg)"
# print ylabelStr
# obs = SearchObs(timeStart, timeEnd, vari_Sim[i], ClimateDB)
obsDates, obsValues = SearchObs2(timeStart, timeEnd, vari_Sim[i], PLOT_SUBBASINID, ClimateDB)
# print obs
# p1 = ax.bar(sim_date, obs[0], label = "Observation", color = "none", edgecolor = 'black',
# linewidth = 1, align = "center", hatch = "//")
if obsValues is not None:
# p1 = ax.bar(obsDates, obsValues, label = "Observation", color = "none", edgecolor = 'black',
# linewidth = 1, align = "center", hatch = "//")
p1 = ax.bar(obsDates, obsValues, label="Observation", color="#55aa33", linewidth=0, align="center")
p2, = ax.plot(sim_date, simList[i], label="Simulation", color="black", marker="o", markersize=2, linewidth=1)
plt.xlabel('Date')
# format the ticks date axis
# autodates = mdates.AutoDateLocator()
days = mdates.DayLocator(bymonthday = range(1, 32), interval = 4)
months = mdates.MonthLocator()
dateFmt = mdates.DateFormatter('%m-%d')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(dateFmt)
ax.xaxis.set_minor_locator(days)
ax.tick_params('both', length = 5, width = 2, which = 'major')
ax.tick_params('both', length = 3, width = 1, which = 'minor')
# fig.autofmt_xdate()
plt.ylabel(ylabelStr)
# plt.legend(bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
ax.set_ylim(float(min(simList[i])) * 0.2, float(max(simList[i])) * 1.8)
ax2 = ax.twinx()
ax.tick_params(axis = 'x', which = 'both', bottom = 'on', top = 'off')
ax2.tick_params('y', length = 5, width = 2, which = 'major')
ax2.set_ylabel(r"Precipitation (mm)")
p3 = ax2.bar(sim_date, preci, label = "Rainfall", color = "#3366cc", linewidth = 0, align = "center")
ax2.set_ylim(float(max(preci)) * 1.8, float(min(preci)) * 0.8)
if obsValues is None or len(obsValues) < 2:
ax.legend([p3, p2], ["Rainfall", "Simulation"],
bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
else:
ax.legend([p3, p1, p2], ["Rainfall", "Observation", "Simulation"],
bbox_to_anchor = (0.03, 0.85), loc = 2, shadow = True)
simValues = []
obsValuesNew = []
for obsIdx, dd in enumerate(obsDates):
try:
idx = sim_date.index(dd)
simValues.append(simList[i][idx])
obsValuesNew.append(obsValues[obsIdx])
except ValueError:
pass
if len(obsValuesNew) > 1:
# print vari_Sim[i]
# print "obs: ", obsValuesNew
# print "sim: ", simValues
plt.title("\nNash: %.2f, R$^2$: %.2f" % (NashCoef(obsValuesNew, simValues),
RSquare(obsValuesNew, simValues)),
color = "red", loc = 'right')
plt.title("Simulation of %s\n" % vari_Sim[i], color = "#aa0903")
# plt.title("\nNash: %s, R$^2$: %s" %
# (NashCoef(obs[0], simList[i], len(obs[1])),
# RSquare(obs[0], simList[i], len(obs[1]))),
# color = "red", loc = 'right')
plt.tight_layout()
plt.savefig(model_dir + os.sep + vari_Sim[i] + ".png", dpi=200)
plt.show()
| gpl-2.0 |
RayleighChen/Improve | Project/python/PyNN/weights.py | 1 | 1708 | import numpy as np
import matplotlib.pyplot as plt
D = np.random.randn(1000, 500)
hidden_layer_sizes = [500]*10
#***************active funtion "tanh"
nonlinearities = ['tanh']*len(hidden_layer_sizes)
#***************active funtion "tanh"
#nonlinearities = ['relu']*len(hidden_layer_sizes)
act = {'relu':lambda x:np.maximun(0, x), 'tanh':lambda x:np.tanh(x)}
Hs = {}
for i in xrange(len(hidden_layer_sizes)):
X = D if i == 0 else Hs[i-1] #input layer
fan_in = X.shape[1]
fan_out = hidden_layer_sizes[i]
#-------little random number
#W = np.random.randn(fan_in, fan_out) * 0.01
#-------zero
#W = 0
#-------big number
W = np.random.randn(fan_in, fan_out) * 1
#-------xavier and tanh (xavier and relu)
#W = np.random.randn(fan_in, fan_out) / np.sqrt(fan_in)
#-------Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification by He et al., 2015
#W = np.random.randn(fan_in, fan_out) / np.sqrt(fan_in/2)
H = np.dot(X, W)
H = act[nonlinearities[i]](H)
Hs[i] = H # cache result on this layer
print 'input layer had mean %f and std %f' % (np.mean(D), np.std(D))
layer_means = [np.mean(H) for i,H in Hs.iteritems()]
layer_stds = [np.std(H) for i,H in Hs.iteritems()]
for i,H in Hs.iteritems():
print 'hidden layer %d had mean %f and std %f' % (i+1, layer_means[i], layer_stds[i])
plt.figure()
plt.subplot(121)
plt.plot(Hs.keys(), layer_means, 'ob-')
plt.title('layer mean')
plt.subplot(122)
plt.plot(Hs.keys(), layer_stds, 'or-')
plt.title('layer std')
#plot the raw distributions
plt.figure()
for i,H in Hs.iteritems():
plt.subplot(1,len(Hs),i+1)
plt.hist(H.ravel(), 30, range=(-1,1))
plt.show()
| mit |
deepesch/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
celiacintas/prediction_tests | pre_process.py | 1 | 4674 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
import argparse
import os
import re
# If you have bed bim fam files first run
# plink --bfile bed_file --recodeA --out raw_file
# this return the ped and map files
class RawDataPheno(object):
"""
docstring for RawDataPheno
"""
def __init__(self, filename, pheno_name):
super(RawDataPheno, self).__init__()
self.filename = filename
self.pheno_name = pheno_name
#@profile
def get_pheno(self):
"""
docstring for get_pheno
"""
df = pd.read_csv(self.filename, usecols=self.pheno_name)
return df
class RawDataGeno(object):
"""
docstring for RawData
"""
def __init__(self, filenames, snps_to_use):
"""
"""
super(RawDataGeno, self).__init__()
self.filenames = filenames
# Get the header from the first file
self.all_snps = self.get_all_snps(filenames[0])
self.snps_to_use = self.get_filter_snps(snps_to_use)
self.snps_to_use = [snp[0] for snp in self.snps_to_use]
#@profile
def get_all_snps(self, filename):
"""
Return all the columns of the original file.
"""
df = pd.read_csv(filename, sep=r"\s*", nrows=1)
cols = df.columns.values
del df
return cols
#@profile
def get_filter_snps(self, snps_to_use):
"""
Get only the columns for the ID and wanted snps.
"""
#TODO get by parameters the snps to filter
union_term = "_\w|"
search_term = "(IID|" + union_term.join(snps_to_use) + "_\w)"
search_re = re.compile(search_term).search
return [ ( l, m.group(1) ) for l in self.all_snps for m in (search_re(l),) if m]
#@profile
def get_geno(self, filename):
"""
Return only the values of the selected snps and the
ID of the individual.
"""
df = pd.read_csv(filename, header=None, names=self.all_snps,
usecols=self.snps_to_use, sep=r"\s*")
return df
#@profile
def save_binary_file(self, df, file):
"""
Take geno values, snps and id to npz files
"""
#TODO check if this have to be a method
X = df.ix[:, 1:].values
snps = self.snps_to_use
ids = df['IID'].values
np.savez("out_".format(os.path.splitext(os.path.basename(file))[0]),
X=X, snps=snps, ids=ids)
###### CLEAN THIS #######
#@profile
def merge_geno_pheno(df_pheno, ids, X, snps):
"""
"""
# TODO pass by parameters
#df_pheno = get_pheno('data/prediction/EarPhenoNew2NA.txt', ['IID', 'LobeSize'])
df_geno_id = pd.DataFrame(ids, columns=['IID'])
df_geno_data = pd.DataFrame(X, columns=snps[1:])
df_geno = pd.concat([df_geno_id, df_geno_data], axis=1)
df_merged = pd.merge(df_pheno, df_geno, on='IID')
df_merged.fillna(-1, inplace=True)
return df_merged
#@profile
def load_npz(filename):
"""
"""
data = np.load(filename)
ids = data['ids']
X = data['X']
snps = data['snps']
return ids, X, snps
def normalization(X):
"""
"""
imputer = Imputer(missing_values=-1, strategy="most_frequent")
X = imputer.fit_transform(X)
return X
###### MAIN PART #######
#@profile
def main(filenames, snps_to_use):
"""
Save the raw data into npz files for faster manipulation
"""
#TODO file by parameter
my_data = RawDataGeno(filenames, snps_to_use)
for file in filenames:
df = my_data.get_geno(file)
my_data.save_binary_file(df, file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Pre Processing Data')
group = parser.add_mutually_exclusive_group()
group.add_argument("--file", dest="file", default=None,
help='Pass the path of the file to be process')
group.add_argument("--folder", dest="folder", default=None,
help='Pass the path of the file to be process')
args = parser.parse_args()
if args.file:
filenames = [args.file]
elif args.folder:
filenames = sorted(map(lambda f: os.path.join(args.folder, f), os.listdir(args.folder)))
else:
parser.print_help()
sys.exit(1)
#TOD pass this by parameter
snps_to_use = ["rs17023457", "rs3827760", "rs2080401", "rs10212419",
"rs1960918", "rs263156", "rs1619249"]
main(filenames, snps_to_use) | gpl-2.0 |
jstoxrocky/statsmodels | statsmodels/tsa/statespace/tests/test_sarimax.py | 6 | 49508 | """
Tests for SARIMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.tsa.statespace import sarimax, tools
from statsmodels.tsa import arima_model as arima
from .results import results_sarimax
from statsmodels.tools import add_constant
from numpy.testing import assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
coverage_path = 'results' + os.sep + 'results_sarimax_coverage.csv'
coverage_results = pd.read_csv(current_path + os.sep + coverage_path)
class TestSARIMAXStatsmodels(object):
"""
Test ARIMA model using SARIMAX class against statsmodels ARIMA class
"""
def __init__(self):
self.true = results_sarimax.wpi1_stationary
endog = self.true['data']
self.model_a = arima.ARIMA(endog, order=(1,1,1))
self.result_a = self.model_a.fit(disp=-1)
self.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
simple_differencing=True,
hamilton_representation=True)
self.result_b = self.model_b.fit(disp=-1)
def test_loglike(self):
assert_allclose(self.result_b.llf, self.result_a.llf)
def test_aic(self):
assert_allclose(self.result_b.aic, self.result_a.aic)
def test_bic(self):
assert_allclose(self.result_b.bic, self.result_a.bic)
def test_hqic(self):
assert_allclose(self.result_b.hqic, self.result_a.hqic)
def test_mle(self):
# ARIMA estimates the mean of the process, whereas SARIMAX estimates
# the intercept. Convert the mean to intercept to compare
params_a = self.result_a.params
params_a[0] = (1 - params_a[1]) * params_a[0]
assert_allclose(self.result_b.params[:-1], params_a, atol=5e-5)
def test_bse(self):
assert_allclose(
self.result_b.bse[1:-1],
self.result_a.bse[1:],
atol=1e-2
)
def test_t_test(self):
import statsmodels.tools._testing as smt
#self.result_b.pvalues
#self.result_b._cache['pvalues'] += 1 # use to trigger failure
smt.check_ttest_tvalues(self.result_b)
smt.check_ftest_pvalues(self.result_b)
class SARIMAXStataTests(object):
def test_loglike(self):
assert_almost_equal(
self.result.llf,
self.true['loglike'], 4
)
def test_aic(self):
assert_almost_equal(
self.result.aic,
self.true['aic'], 3
)
def test_bic(self):
assert_almost_equal(
self.result.bic,
self.true['bic'], 3
)
def test_hqic(self):
hqic = (
-2*self.result.llf +
2*np.log(np.log(self.result.nobs)) *
self.result.params.shape[0]
)
assert_almost_equal(
self.result.hqic,
hqic, 3
)
class ARIMA(SARIMAXStataTests):
"""
ARIMA model
Stata arima documentation, Example 1
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = true['data']
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
*args, **kwargs)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.model.update(params)
self.result = self.model.filter()
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestARIMAStationary(ARIMA):
def __init__(self):
super(TestARIMAStationary, self).__init__(
results_sarimax.wpi1_stationary
)
self.result = self.model.filter()
def test_bse(self):
assert_allclose(
self.result.bse[1], self.true['se_ar_oim'],
atol=1e-3,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_oim'],
atol=1e-3, rtol=1e-2
)
class TestARIMADiffuse(ARIMA):
def __init__(self):
super(TestARIMADiffuse, self).__init__(results_sarimax.wpi1_diffuse)
self.model.initialize_approximate_diffuse(self.true['initial_variance'])
self.result = self.model.filter()
def test_bse(self):
assert_allclose(
self.result.bse[1], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_oim'],
atol=1e-1, rtol=1e-1
)
class AdditiveSeasonal(SARIMAXStataTests):
"""
ARIMA model with additive seasonal effects
Stata arima documentation, Example 2
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(1, 1, (1, 0, 0, 1)), trend='c', *args, **kwargs
)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.model.update(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestAdditiveSeasonal(AdditiveSeasonal):
def __init__(self):
super(TestAdditiveSeasonal, self).__init__(
results_sarimax.wpi1_seasonal
)
self.result = self.model.filter()
def test_bse(self):
assert_allclose(
self.result.bse[1], self.true['se_ar_oim'],
atol=1e-3, rtol=1e-2
)
assert_allclose(
self.result.bse[2:4], self.true['se_ma_oim'],
atol=1e-3,
)
class Airline(SARIMAXStataTests):
"""
Multiplicative SARIMA model: "Airline" model
Stata arima documentation, Example 3
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(0, 1, 1), seasonal_order=(0, 1, 1, 12),
trend='n', *args, **kwargs
)
params = np.r_[true['params_ma'], true['params_seasonal_ma'],
true['params_variance']]
self.model.update(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-4
)
class TestAirlineHamilton(Airline):
def __init__(self):
super(TestAirlineHamilton, self).__init__(
results_sarimax.air2_stationary
)
self.result = self.model.filter()
def test_bse(self):
assert_allclose(
self.result.bse[0], self.true['se_ma_oim'],
atol=1e-4,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-3,
)
class TestAirlineHarvey(Airline):
def __init__(self):
super(TestAirlineHarvey, self).__init__(
results_sarimax.air2_stationary, hamilton_representation=False
)
self.result = self.model.filter()
def test_bse(self):
assert_allclose(
self.result.bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-3,
)
class TestAirlineStateDifferencing(Airline):
def __init__(self):
super(TestAirlineStateDifferencing, self).__init__(
results_sarimax.air2_stationary, simple_differencing=False,
hamilton_representation=False
)
self.result = self.model.filter()
def test_bic(self):
# Due to diffuse component of the state (which technically changes the
# BIC calculation - see Durbin and Koopman section 7.4), this is the
# best we can do for BIC
assert_almost_equal(
self.result.bic,
self.true['bic'], 0
)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
def test_bse(self):
assert_allclose(
self.result.bse[0], self.true['se_ma_oim'],
atol=1e-3,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-4,
)
class Friedman(SARIMAXStataTests):
"""
ARMAX model: Friedman quantity theory of money
Stata arima documentation, Example 4
"""
def __init__(self, true, exog=None, *args, **kwargs):
self.true = true
endog = np.r_[true['data']['consump']]
if exog is None:
exog = add_constant(true['data']['m2'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, exog=exog, order=(1, 0, 1), *args, **kwargs
)
params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
self.model.update(params)
class TestFriedmanMLERegression(Friedman):
def __init__(self):
super(TestFriedmanMLERegression, self).__init__(
results_sarimax.friedman2_mle
)
self.result = self.model.filter()
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-2, rtol=1e-3
)
def test_bse(self):
assert_allclose(
self.result.bse[0], self.true['se_exog_oim'][0],
rtol=3e-1
)
assert_allclose(
self.result.bse[1], self.true['se_exog_oim'][1],
atol=1e-2,
)
assert_allclose(
self.result.bse[2], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
self.result.bse[3], self.true['se_ma_oim'],
atol=1e-2,
)
class TestFriedmanStateRegression(Friedman):
def __init__(self):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_oim'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, self).__init__(
true, exog=exog, mle_regression=False
)
self.result = self.model.filter()
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-1, rtol=1e-1
)
def test_regression_parameters(self):
# The regression effects are integrated into the state vector as
# the last two states (thus the index [-2:]). The filtered
# estimates of the state vector produced by the Kalman filter and
# stored in `filtered_state` for these state elements give the
# recursive least squares estimates of the regression coefficients
# at each time period. To get the estimates conditional on the
# entire dataset, use the filtered states from the last time
# period (thus the index [-1]).
assert_almost_equal(
self.result.filtered_state[-2:, -1] / 10.,
self.true['mle_params_exog'], 1
)
# Loglikelihood (and so aic, bic) is slightly different when states are
# integrated into the state vector
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse(self):
assert_allclose(
self.result.bse[0], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
self.result.bse[1], self.true['se_ma_oim'],
atol=1e-2,
)
class TestFriedmanPredict(Friedman):
"""
ARMAX model: Friedman quantity theory of money, prediction
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This follows the given Stata example, although it is not truly forecasting
because it compares using the actual data (which is available in the
example but just not used in the parameter MLE estimation) against dynamic
prediction of that data. Here `test_predict` matches the first case, and
`test_dynamic_predict` matches the second.
"""
def __init__(self):
super(TestFriedmanPredict, self).__init__(
results_sarimax.friedman2_predict
)
self.result = self.model.filter()
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_predict(self):
assert_almost_equal(
self.result.predict()[0],
self.true['predict'], 3
)
def test_dynamic_predict(self):
dynamic = len(self.true['data']['consump'])-15-1
assert_almost_equal(
self.result.predict(dynamic=dynamic)[0],
self.true['dynamic_predict'], 3
)
class TestFriedmanForecast(Friedman):
"""
ARMAX model: Friedman quantity theory of money, forecasts
Variation on:
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This is a variation of the Stata example, in which the endogenous data is
actually made to be missing so that the predict command must forecast.
As another unit test, we also compare against the case in State when
predict is used against missing data (so forecasting) with the dynamic
option also included. Note, however, that forecasting in State space models
amounts to running the Kalman filter against missing datapoints, so it is
not clear whether "dynamic" forecasting (where instead of missing
datapoints for lags, we plug in previous forecasted endog values) is
meaningful.
"""
def __init__(self):
true = dict(results_sarimax.friedman2_predict)
true['forecast_data'] = {
'consump': true['data']['consump'][-15:],
'm2': true['data']['m2'][-15:]
}
true['data'] = {
'consump': true['data']['consump'][:-15],
'm2': true['data']['m2'][:-15]
}
super(TestFriedmanForecast, self).__init__(true)
self.result = self.model.filter()
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog)[0],
self.true['forecast'], 3
)
def test_dynamic_forecast(self):
end = len(self.true['data']['consump'])+15-1
dynamic = len(self.true['data']['consump'])-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, dynamic=dynamic, exog=exog)[0],
self.true['dynamic_forecast'], 3
)
class SARIMAXCoverageTest(object):
def __init__(self, i, decimal=4, endog=None, *args, **kwargs):
# Dataset
if endog is None:
endog = results_sarimax.wpi1_data
# Loglikelihood, parameters
self.true_loglike = coverage_results.loc[i]['llf']
self.true_params = np.array([float(x) for x in coverage_results.loc[i]['parameters'].split(',')])
# Stata reports the standard deviation; make it the variance
self.true_params[-1] = self.true_params[-1]**2
# Test parameters
self.decimal = decimal
# Compare using the Hamilton representation and simple differencing
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, *args, **kwargs)
def test_loglike(self):
self.model.update(self.true_params)
self.result = self.model.filter()
assert_allclose(
self.result.llf,
self.true_loglike,
atol=0.7 * 10**(-self.decimal)
)
def test_start_params(self):
# just a quick test that start_params isn't throwing an exception
# (other than related to invertibility)
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
self.model.start_params
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_transform_untransform(self):
true_constrained = self.true_params
# Sometimes the parameters given by Stata are not stationary and / or
# invertible, so we need to skip those transformations for those
# parameter sets
self.model.update(self.true_params)
contracted_polynomial_seasonal_ar = self.model.polynomial_seasonal_ar[self.model.polynomial_seasonal_ar.nonzero()]
self.model.enforce_stationarity = (
(self.model.k_ar == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ar[1:]])) and
(len(contracted_polynomial_seasonal_ar) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ar[1:]]))
)
contracted_polynomial_seasonal_ma = self.model.polynomial_seasonal_ma[self.model.polynomial_seasonal_ma.nonzero()]
self.model.enforce_invertibility = (
(self.model.k_ma == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ma[1:]])) and
(len(contracted_polynomial_seasonal_ma) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ma[1:]]))
)
unconstrained = self.model.untransform_params(true_constrained)
constrained = self.model.transform_params(unconstrained)
assert_almost_equal(constrained, true_constrained, 4)
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_results(self):
self.model.update(self.true_params)
self.result = self.model.filter()
# Just make sure that no exceptions are thrown during summary
self.result.summary()
def test_init_keys_replicate(self):
mod1 = self.model
mod1.update(self.true_params) # test with side effect ?
if mod1.initialization == 'approximate_diffuse':
raise SkipTest('known failure: init_keys incomplete')
kwargs = self.model._get_init_kwds()
# TODO: current limitations #2259 comments
#endog = mod1.endog.squeeze() # test failures, endog may be transformed
# this works
#endog = mod1.orig_endog
#exog = mod1.orig_exog
# in SARIMAX endog, exog will always be in kwargs but not in other models
if 'endog' in kwargs:
endog = kwargs.pop('endog')
if 'exog' in kwargs:
exog = kwargs.pop('exog')
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
model2.update(self.true_params)
res1 = self.model.filter()
res2 = model2.filter()
assert_allclose(res2.llf, res1.llf, rtol=1e-13)
class Test_ar(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
super(Test_ar, self).__init__(0, *args, **kwargs)
class Test_ar_as_polynomial(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = ([1,1,1],0,0)
super(Test_ar_as_polynomial, self).__init__(0, *args, **kwargs)
class Test_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,0) noconstant vce(oim)
# save_results 2
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'c'
super(Test_ar_trend_c, self).__init__(1, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[1:4].sum()) * self.true_params[0]
class Test_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,0) noconstant vce(oim)
# save_results 3
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'ct'
super(Test_ar_trend_ct, self).__init__(2, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,0) noconstant vce(oim)
# save_results 4
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = [1,0,0,1]
super(Test_ar_trend_polynomial, self).__init__(3, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_diff(SARIMAXCoverageTest):
# // AR and I(d): (p,d,0) x (0,0,0,0)
# arima wpi, arima(3,2,0) noconstant vce(oim)
# save_results 5
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,0)
super(Test_ar_diff, self).__init__(4, *args, **kwargs)
class Test_ar_seasonal_diff(SARIMAXCoverageTest):
# // AR and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(3,0,0) sarima(0,2,0,4) noconstant vce(oim)
# save_results 6
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ar_seasonal_diff, self).__init__(5, *args, **kwargs)
class Test_ar_diffuse(SARIMAXCoverageTest):
# // AR and diffuse initialization
# arima wpi, arima(3,0,0) noconstant vce(oim) diffuse
# save_results 7
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
super(Test_ar_diffuse, self).__init__(6, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_ar_no_enforce(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['enforce_stationarity'] = False
kwargs['enforce_invertibility'] = False
kwargs['initial_variance'] = 1e9
super(Test_ar_no_enforce, self).__init__(6, *args, **kwargs)
# Reset loglikelihood burn, which gets automatically set to the number
# of states if enforce_stationarity = False
self.model.loglikelihood_burn = 0
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.model.update(self.true_params)
self.result = self.model.filter()
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
class Test_ar_exogenous(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ar_exogenous, self).__init__(7, *args, **kwargs)
class Test_ar_exogenous_in_state(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['mle_regression'] = False
super(Test_ar_exogenous_in_state, self).__init__(7, *args, **kwargs)
self.true_regression_coefficient = self.true_params[0]
self.true_params = self.true_params[1:]
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.model.update(self.true_params)
self.result = self.model.filter()
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
def test_regression_coefficient(self):
# Test that the regression coefficient (estimated as the last filtered
# state estimate for the regression state) is the same as the Stata
# MLE state
self.model.update(self.true_params)
self.result = self.model.filter()
assert_allclose(
self.result.filtered_state[3][-1],
self.true_regression_coefficient,
self.decimal
)
class Test_ma(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
super(Test_ma, self).__init__(8, *args, **kwargs)
class Test_ma_as_polynomial(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,[1,1,1])
super(Test_ma_as_polynomial, self).__init__(8, *args, **kwargs)
class Test_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(0,0,3) noconstant vce(oim)
# save_results 10
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'c'
super(Test_ma_trend_c, self).__init__(9, *args, **kwargs)
class Test_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(0,0,3) noconstant vce(oim)
# save_results 11
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'ct'
super(Test_ma_trend_ct, self).__init__(10, *args, **kwargs)
class Test_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(0,0,3) noconstant vce(oim)
# save_results 12
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = [1,0,0,1]
super(Test_ma_trend_polynomial, self).__init__(11, *args, **kwargs)
class Test_ma_diff(SARIMAXCoverageTest):
# // MA and I(d): (0,d,q) x (0,0,0,0)
# arima wpi, arima(0,2,3) noconstant vce(oim)
# save_results 13
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,3)
super(Test_ma_diff, self).__init__(12, *args, **kwargs)
class Test_ma_seasonal_diff(SARIMAXCoverageTest):
# // MA and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(0,0,3) sarima(0,2,0,4) noconstant vce(oim)
# save_results 14
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ma_seasonal_diff, self).__init__(13, *args, **kwargs)
class Test_ma_diffuse(SARIMAXCoverageTest):
# // MA and diffuse initialization
# arima wpi, arima(0,0,3) noconstant vce(oim) diffuse
# save_results 15
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
super(Test_ma_diffuse, self).__init__(14, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_ma_exogenous(SARIMAXCoverageTest):
# // MAX
# arima wpi x, arima(0,0,3) noconstant vce(oim)
# save_results 16
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ma_exogenous, self).__init__(15, *args, **kwargs)
class Test_arma(SARIMAXCoverageTest):
# // ARMA: (p,0,q) x (0,0,0,0)
# arima wpi, arima(3,0,3) noconstant vce(oim)
# save_results 17
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,3)
super(Test_arma, self).__init__(16, *args, **kwargs)
class Test_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,2) noconstant vce(oim)
# save_results 18
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'c'
super(Test_arma_trend_c, self).__init__(17, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,2) noconstant vce(oim)
# save_results 19
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'ct'
super(Test_arma_trend_ct, self).__init__(18, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,2) noconstant vce(oim)
# save_results 20
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = [1,0,0,1]
super(Test_arma_trend_polynomial, self).__init__(19, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_diff(SARIMAXCoverageTest):
# // ARMA and I(d): (p,d,q) x (0,0,0,0)
# arima wpi, arima(3,2,2) noconstant vce(oim)
# save_results 21
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
super(Test_arma_diff, self).__init__(20, *args, **kwargs)
class Test_arma_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(D): (p,0,q) x (0,D,0,s)
# arima wpi, arima(3,0,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 22
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_seasonal_diff, self).__init__(21, *args, **kwargs)
class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(d) and I(D): (p,d,q) x (0,D,0,s)
# arima wpi, arima(3,2,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 23
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_diff_seasonal_diff, self).__init__(22, *args, **kwargs)
class Test_arma_diffuse(SARIMAXCoverageTest):
# // ARMA and diffuse initialization
# arima wpi, arima(3,0,2) noconstant vce(oim) diffuse
# save_results 24
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
# TODO: diffuse initialization not supported through keywords #2259
kwargs['initial_variance'] = 1e9
super(Test_arma_diffuse, self).__init__(23, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_arma_exogenous(SARIMAXCoverageTest):
# // ARMAX
# arima wpi x, arima(3,0,2) noconstant vce(oim)
# save_results 25
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_arma_exogenous, self).__init__(24, *args, **kwargs)
class Test_seasonal_ar(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = ([1,1,1],0,0,4)
super(Test_seasonal_ar_as_polynomial, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,0,4) noconstant vce(oim)
# save_results 27
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'c'
super(Test_seasonal_ar_trend_c, self).__init__(26, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,0,4) noconstant vce(oim)
# save_results 28
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ar_trend_ct, self).__init__(27, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,0,4) noconstant vce(oim)
# save_results 29
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = [1,0,0,1]
super(Test_seasonal_ar_trend_polynomial, self).__init__(28, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_diff(SARIMAXCoverageTest):
# // SAR and I(d): (0,d,0) x (P,0,0,s)
# arima wpi, arima(0,2,0) sarima(3,0,0,4) noconstant vce(oim)
# save_results 30
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar_diff, self).__init__(29, *args, **kwargs)
class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest):
# // SAR and I(D): (0,0,0) x (P,D,0,s)
# arima wpi, sarima(3,2,0,4) noconstant vce(oim)
# save_results 31
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,0,4)
super(Test_seasonal_ar_seasonal_diff, self).__init__(30, *args, **kwargs)
class Test_seasonal_ar_diffuse(SARIMAXCoverageTest):
# // SAR and diffuse initialization
# arima wpi, sarima(3,0,0,4) noconstant vce(oim) diffuse
# save_results 32
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar_diffuse, self).__init__(31, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_seasonal_ar_exogenous(SARIMAXCoverageTest):
# // SARX
# arima wpi x, sarima(3,0,0,4) noconstant vce(oim)
# save_results 33
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ar_exogenous, self).__init__(32, *args, **kwargs)
class Test_seasonal_ma(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,[1,1,1],4)
super(Test_seasonal_ma_as_polynomial, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(0,0,3,4) noconstant vce(oim)
# save_results 35
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'c'
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_c, self).__init__(34, *args, **kwargs)
class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(0,0,3,4) noconstant vce(oim)
# save_results 36
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ma_trend_ct, self).__init__(35, *args, **kwargs)
class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(0,0,3,4) noconstant vce(oim)
# save_results 37
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_polynomial, self).__init__(36, *args, **kwargs)
class Test_seasonal_ma_diff(SARIMAXCoverageTest):
# // SMA and I(d): (0,d,0) x (0,0,Q,s)
# arima wpi, arima(0,2,0) sarima(0,0,3,4) noconstant vce(oim)
# save_results 38
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma_diff, self).__init__(37, *args, **kwargs)
class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest):
# // SMA and I(D): (0,0,0) x (0,D,Q,s)
# arima wpi, sarima(0,2,3,4) noconstant vce(oim)
# save_results 39
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,2,3,4)
super(Test_seasonal_ma_seasonal_diff, self).__init__(38, *args, **kwargs)
class Test_seasonal_ma_diffuse(SARIMAXCoverageTest):
# // SMA and diffuse initialization
# arima wpi, sarima(0,0,3,4) noconstant vce(oim) diffuse
# save_results 40
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma_diffuse, self).__init__(39, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_seasonal_ma_exogenous(SARIMAXCoverageTest):
# // SMAX
# arima wpi x, sarima(0,0,3,4) noconstant vce(oim)
# save_results 41
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ma_exogenous, self).__init__(40, *args, **kwargs)
class Test_seasonal_arma(SARIMAXCoverageTest):
# // SARMA: (0,0,0) x (P,0,Q,s)
# arima wpi, sarima(3,0,2,4) noconstant vce(oim)
# save_results 42
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma, self).__init__(41, *args, **kwargs)
class Test_seasonal_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,2,4) noconstant vce(oim)
# save_results 43
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'c'
super(Test_seasonal_arma_trend_c, self).__init__(42, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,2,4) noconstant vce(oim)
# save_results 44
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_arma_trend_ct, self).__init__(43, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,2,4) noconstant vce(oim)
# save_results 45
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_arma_trend_polynomial, self).__init__(44, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_arma_diff(SARIMAXCoverageTest):
# // SARMA and I(d): (0,d,0) x (P,0,Q,s)
# arima wpi, arima(0,2,0) sarima(3,0,2,4) noconstant vce(oim)
# save_results 46
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma_diff, self).__init__(45, *args, **kwargs)
class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(D): (0,0,0) x (P,D,Q,s)
# arima wpi, sarima(3,2,2,4) noconstant vce(oim)
# save_results 47
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_seasonal_diff, self).__init__(46, *args, **kwargs)
class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(d) and I(D): (0,d,0) x (P,D,Q,s)
# arima wpi, arima(0,2,0) sarima(3,2,2,4) noconstant vce(oim)
# save_results 48
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_diff_seasonal_diff, self).__init__(47, *args, **kwargs)
class Test_seasonal_arma_diffuse(SARIMAXCoverageTest):
# // SARMA and diffuse initialization
# arima wpi, sarima(3,0,2,4) noconstant vce(oim) diffuse
# save_results 49
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['decimal'] = 3
super(Test_seasonal_arma_diffuse, self).__init__(48, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_seasonal_arma_exogenous(SARIMAXCoverageTest):
# // SARMAX
# arima wpi x, sarima(3,0,2,4) noconstant vce(oim)
# save_results 50
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_arma_exogenous, self).__init__(49, *args, **kwargs)
class Test_sarimax_exogenous(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_sarimax_exogenous, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['hamilton_representation'] = False
kwargs['simple_differencing'] = False
super(Test_sarimax_exogenous_not_hamilton, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest):
# // SARIMAX and exogenous diffuse
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) diffuse
# save_results 52
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['decimal'] = 2
super(Test_sarimax_exogenous_diffuse, self).__init__(51, *args, **kwargs)
self.model.initialize_approximate_diffuse(1e9)
class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest):
# // ARMA and exogenous and trend polynomial and missing
# gen wpi2 = wpi
# replace wpi2 = . in 10/19
# arima wpi2 x c t3, arima(3,0,2) noconstant vce(oim)
# save_results 53
def __init__(self, *args, **kwargs):
endog = np.r_[results_sarimax.wpi1_data]
# Note we're using the non-missing exog data
kwargs['exog'] = ((endog - np.floor(endog))**2)[1:]
endog[9:19] = np.nan
endog = endog[1:] - endog[:-1]
endog[9] = np.nan
kwargs['order'] = (3,0,2)
kwargs['trend'] = [0,0,0,1]
kwargs['decimal'] = 1
super(Test_arma_exog_trend_polynomial_missing, self).__init__(52, endog=endog, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[2:5].sum()) * self.true_params[0]
# Miscellaneous coverage tests
def test_simple_time_varying():
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
# Ignore the warning that MLE doesn't converge
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
# Test that the estimated variances of the errors are essentially zero
assert_almost_equal(res.params, [0,0], 7)
# Test that the time-varying coefficients are all 0.5 (except the first
# one)
assert_almost_equal(res.filtered_state[0][1:], [0.5]*99, 9)
def test_invalid_time_varying():
assert_raises(ValueError, sarimax.SARIMAX, endog=[1,2,3], mle_regression=True, time_varying_regression=True)
def test_manual_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3,0,0))
mod1.update([0.5,0.2,0.1,1])
res1 = mod1.filter()
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3,0,0))
mod2.update([0.5,0.2,0.1,1])
mod2.initialize_known(res1.initial_state, res1.initial_state_cov)
mod2.initialize_state() # a noop in this case (include for coverage)
res2 = mod2.filter()
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filtered_state, res2.filtered_state)
def test_results():
endog = results_sarimax.wpi1_data
mod = sarimax.SARIMAX(endog, order=(1,0,1))
mod.update([0.5,-0.5,1])
res = mod.filter()
assert_almost_equal(res.arroots, 2.)
assert_almost_equal(res.maroots, 2.)
assert_almost_equal(res.arfreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.mafreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.arparams, [0.5])
assert_almost_equal(res.maparams, [-0.5])
| bsd-3-clause |
igorcompuff/ns-3.26 | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
maniteja123/numpy | numpy/core/tests/test_multiarray.py | 6 | 246246 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
import gc
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
rainforestapp/destimator | destimator/tests/test.py | 1 | 7733 | from __future__ import print_function, unicode_literals
import os
import shutil
import zipfile
import datetime
import tempfile
import subprocess
from copy import deepcopy
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from sklearn.dummy import DummyClassifier
from destimator import DescribedEstimator, utils
@pytest.fixture
def features():
return np.zeros([10, 3])
@pytest.fixture
def labels():
labels = np.zeros(10)
labels[5:] = 1.0
return labels
@pytest.fixture
def clf(features, labels):
clf = DummyClassifier(strategy='constant', constant=0.0)
clf.fit(features, labels)
return clf
@pytest.fixture
def clf_described(clf, features, labels, feature_names):
return DescribedEstimator(clf, features, labels, features, labels, feature_names)
@pytest.fixture
def feature_names():
return ['one', 'two', 'three']
@pytest.fixture
def metadata_v1():
return {
'metadata_version': 1,
'created_at': '2016-01-01-00-00-00',
'feature_names': ['f0', 'f1', 'f2'],
'vcs_hash': 'deadbeef',
'distribution_info': {
'python': 3.5,
'packages': [],
},
}
@pytest.fixture
def metadata_v2():
return {
'metadata_version': 2,
'created_at': '2016-02-01-00-00-00',
'feature_names': ['f0', 'f1', 'f2'],
'vcs_hash': 'deadbeef',
'distribution_info': {
'python': 3.5,
'packages': [],
},
'performance_scores': {
'precision': [0.7],
'recall': [0.8],
'fscore': [0.9],
'support': [100],
'roc_auc': 0.6,
'log_loss': 0.5,
}
}
class TestDescribedEstimator(object):
def test_init(self, clf_described):
assert clf_described.n_training_samples_ == 10
assert clf_described.n_features_ == 3
def test_init_error(self, clf, features, labels, feature_names):
with pytest.raises(ValueError):
wrong_labels = np.zeros([9, 1])
DescribedEstimator(clf, features, wrong_labels, features, labels, feature_names)
with pytest.raises(ValueError):
wrong_feature_names = ['']
DescribedEstimator(clf, features, labels, features, labels, wrong_feature_names)
def test_eq(self, clf, features, labels, feature_names, metadata_v1, metadata_v2):
d1 = DescribedEstimator(clf, features, labels, features, labels, compute_metadata=False, metadata=metadata_v1)
d1b = DescribedEstimator(clf, features, labels, features, labels, compute_metadata=False, metadata=metadata_v1)
assert d1 == d1b
d2 = DescribedEstimator(clf, features, labels, features, labels, compute_metadata=False, metadata=metadata_v2)
assert d1 != d2
metadata_v1a = dict(metadata_v1)
metadata_v1a['metadata_version'] = 3
d1a = DescribedEstimator(clf, features, labels, features, labels, compute_metadata=False, metadata=metadata_v1a)
assert d1 != d1a
def test_from_file(self, clf_described):
save_dir = tempfile.mkdtemp()
try:
file_path = clf_described.save(save_dir)
destimator = DescribedEstimator.from_file(file_path)
assert destimator == clf_described
finally:
shutil.rmtree(save_dir)
def test_is_compatible(self, clf, clf_described, features, labels):
compatible = DescribedEstimator(clf, features, labels, features, labels, ['one', 'two', 'three'])
assert clf_described.is_compatible(compatible)
incompatible = DescribedEstimator(clf, features, labels, features, labels, ['one', 'two', 'boom'])
assert not clf_described.is_compatible(incompatible)
def test_metadata(self, clf, features, labels, feature_names):
clf_described = DescribedEstimator(clf, features, labels, features, labels, feature_names)
d = clf_described.metadata
assert d['feature_names'] == feature_names
# assert type(d['metadata_version']) == str
assert type(datetime.datetime.strptime(d['created_at'], '%Y-%m-%d-%H-%M-%S')) == datetime.datetime
# assert type(d['vcs_hash']) == str
assert type(d['distribution_info']) == dict
# assert type(d['distribution_info']['python']) == str
assert type(d['distribution_info']['packages']) == list
assert type(d['performance_scores']['precision']) == list
assert type(d['performance_scores']['precision'][0]) == float
assert type(d['performance_scores']['recall']) == list
assert type(d['performance_scores']['recall'][0]) == float
assert type(d['performance_scores']['fscore']) == list
assert type(d['performance_scores']['fscore'][0]) == float
assert type(d['performance_scores']['support']) == list
assert type(d['performance_scores']['support'][0]) == int
assert type(d['performance_scores']['roc_auc']) == float
assert type(d['performance_scores']['log_loss']) == float
def test_get_metric(self, clf_described):
assert clf_described.recall == [1.0, 0.0]
assert clf_described.roc_auc == 0.5
# log_loss use epsilon 1e-15, so -log(1e-15) / 2 approximately equal 20
assert_almost_equal(clf_described.log_loss, 17.269, decimal=3)
def test_save_classifier(self, clf_described):
save_dir = tempfile.mkdtemp()
try:
saved_name = clf_described.save(save_dir)
assert os.path.dirname(saved_name) == save_dir
assert os.path.isfile(saved_name)
assert saved_name.endswith('.zip')
zf = zipfile.ZipFile(saved_name)
files_present = zf.namelist()
expected_files = [
'model.bin', 'features_train.bin', 'labels_train.bin',
'features_test.bin', 'labels_test.bin', 'metadata.json',
]
# could use a set, but this way errors are easier to read
for f in expected_files:
assert f in files_present
finally:
shutil.rmtree(save_dir)
def test_save_classifier_with_filename(self, clf_described):
save_dir = tempfile.mkdtemp()
try:
saved_name = clf_described.save(save_dir, filename='boom.pkl')
assert os.path.basename(saved_name) == 'boom.pkl.zip'
assert os.path.isfile(saved_name)
finally:
shutil.rmtree(save_dir)
def test_save_classifier_nonexistent_path(self, clf_described):
save_dir = tempfile.mkdtemp()
try:
saved_name = clf_described.save(os.path.join(save_dir, 'nope'))
os.path.dirname(saved_name) == save_dir
assert os.path.isfile(saved_name)
finally:
shutil.rmtree(save_dir)
class TestGetCurrentGitHash(object):
def test_get_current_vcs_hash(self, monkeypatch):
def fake_check_output(*args, **kwargs):
return b'thisisagithash'
monkeypatch.setattr(subprocess, 'check_output', fake_check_output)
assert utils.get_current_vcs_hash() == 'thisisagithash'
def test_get_current_vcs_hash_no_git(self, monkeypatch):
def fake_check_output(*args, **kwargs):
raise OSError()
monkeypatch.setattr(subprocess, 'check_output', fake_check_output)
assert utils.get_current_vcs_hash() == ''
def test_get_current_vcs_hash_git_error(self, monkeypatch):
def fake_check_output(*args, **kwargs):
raise subprocess.CalledProcessError(0, '', '')
monkeypatch.setattr(subprocess, 'check_output', fake_check_output)
assert utils.get_current_vcs_hash() == ''
| mit |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/frames/extreme_value_test.py | 11 | 8496 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Exercise NaN and Inf values in various contexts. """
import unittest
import numpy as np
import math
import sys
import os
from sparktkregtests.lib import sparktk_test
class ExtremeValueTest(sparktk_test.SparkTKTestCase):
def setUp(self):
self.data_proj = self.get_file("extreme_value.csv")
self.schema_proj = [("col_A", int),
("col_B", int),
("col_C", float),
("Double", float),
("Text", str)]
def test_extreme_projection(self):
""" Test projection including Inf / NaN data """
master = self.context.frame.import_csv(
self.data_proj, schema=self.schema_proj)
self.assertEqual(master.count(), 7)
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {123456: np.inf,
777: -np.inf,
4321: np.nan}
return new_val.get(row["col_A"], row["col_A"])
master.add_columns(add_extremes, ("col_D", float))
proj_3col = master.copy(['col_D', 'Double', 'Text'])
self.assertEqual(proj_3col.count(), master.count())
self.assertEqual(len(proj_3col.column_names), 3)
proj_1col = master.copy({'col_A': 'extremes'})
self.assertEqual(proj_1col.count(), master.count())
self.assertEqual(len(proj_1col.column_names), 1)
#check if NaN/inf values are present
test_extreme = master.to_pandas()
for index, row in test_extreme.iterrows():
if(row['col_A'] == 123456 or row['col_A'] == 777):
self.assertTrue(math.isinf(row['col_D']))
if(row['col_A'] == 4321):
self.assertTrue(math.isnan(row['col_D']))
def test_extreme_copy(self):
""" Test copy with Inf / NaN data """
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {700: np.inf,
701: -np.inf,
702: np.nan}
return new_val.get(row["col_A"], row["col_A"])
frame = self.context.frame.import_csv(
self.data_proj, schema=self.schema_proj)
frame.add_columns(add_extremes, ("col_D", float))
frame_copy = frame.copy()
self.assertEqual(frame.column_names, frame_copy.column_names)
self.assertFramesEqual(frame_copy, frame)
def test_extreme_maxmin32(self):
""" Test extremal 32 bit float values"""
schema_maxmin32 = [("col_A", float),
("col_B", float)]
extreme32 = self.context.frame.import_csv(
self.get_file("BigAndTinyFloat32s.csv"),
schema=schema_maxmin32)
self.assertEqual(extreme32.count(), 16)
extreme32.add_columns(lambda row: [np.sqrt(-9)],
[('neg_root', float)])
extake = extreme32.to_pandas(extreme32.count())
for index, row in extake.iterrows():
self.assertTrue(math.isnan(row['neg_root']))
def test_extreme_maxmin64(self):
""" Test extreme large and small magnitudes on 64-bit floats."""
data_maxmin64 = self.get_file('BigAndTinyFloat64s.csv')
schema_maxmin64 = [("col_A", float),
("col_B", float)]
extreme64 = self.context.frame.import_csv(
data_maxmin64, schema=schema_maxmin64)
self.assertEqual(extreme64.count(), 16)
extreme64.add_columns(lambda row:
[row.col_A*2, np.sqrt(-9)],
[("twice", float),
('neg_root', float)])
extake = extreme64.to_pandas(extreme64.count())
#check for inf when values exceed 64-bit range;
#double the value if outside the range [0,1)
for index, row in extake.iterrows():
if row['col_A'] >= 1 or row['col_A'] < 0:
self.assertTrue(math.isinf(row['twice']))
else:
self.assertEqual(row['twice'], row['col_A'] * 2)
self.assertTrue(math.isnan(row['neg_root']))
def test_extreme_colmode(self):
""" Insert NaN and +/-Inf for weights"""
def add_extremes(row):
new_val = {"Charizard": np.inf,
"Squirtle": -np.inf,
"Wartortle": np.nan}
return new_val.get(row["item"], row['weight'])
data_stat = self.get_file("mode_stats.tsv")
schema_stat = [("weight", float), ("item", int)]
stat_frame = self.context.frame.import_csv(
data_stat, schema=schema_stat, delimiter="\t")
stat_frame.add_columns(add_extremes, ("weight2", float))
stats = stat_frame.column_mode(
"item", "weight2", max_modes_returned=50)
self.assertEqual(stats.mode_count, 2)
self.assertEqual(stats.total_weight, 1749)
self.assertIn(60 , stats.modes)
def test_extreme_col_summary(self):
""" Test column_summary_stats with Inf / NaN data """
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {20: np.inf,
30: -np.inf,
40: np.nan}
return new_val.get(row.Item, row.Item)
data_mode = self.get_file("mode_stats2.csv")
schema_mode = [("Item", int), ("Weight", int)]
stat_frame = self.context.frame.import_csv(
data_mode, schema=schema_mode)
# Create new column where only values 10 and 50 are valid;
stat_frame.add_columns(add_extremes, ("Item2", float))
stat_frame.drop_columns("Item")
stat_frame.rename_columns({"Item2": "Item"})
stats = stat_frame.column_summary_statistics(
"Item",
weights_column="Weight",
use_popultion_variance=True)
self.assertEqual(stats.mean, 30)
self.assertEqual(stats.good_row_count, 20)
self.assertAlmostEqual(stats.mean_confidence_upper, 34.3826932359)
def test_extreme_col_summary_corner(self):
""" Test column_summary_stats with very large values and odd cases."""
def add_extremes(row):
bf = float(2) ** 1021
new_val = {600: 30,
1: 0,
2: bf * 2,
3: float(2) ** 1023,
3.1: 5.4}
return new_val.get(row.Factors, row.Factors)
expected_stats = [1624.6, 56, 563700.64]
data_corner = self.get_file("SummaryStats2.csv")
schema_corner = [("Item", float), ("Factors", float)]
stat_frame = self.context.frame.import_csv(
data_corner, schema=schema_corner)
stat_frame.add_columns(add_extremes, ("Weight", float))
stat_frame.drop_columns("Factors")
stats = stat_frame.column_summary_statistics(
"Item", weights_column="Weight")
self.assertAlmostEqual([stats.mean,
stats.good_row_count,
stats.variance], expected_stats)
def test_extreme_col_summary_empty_frame(self):
expected_stats = [float('nan'), 0, 1.0]
schema = [("Item", float), ("Weight", float)]
stat_frame = self.context.frame.create([], schema=schema)
stats = stat_frame.column_summary_statistics(
"Item", weights_column="Weight")
self.assertTrue([math.isnan(stats.maximum),
stats.good_row_count,
stats.geometric_mean], expected_stats)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
bssrdf/deeppy | examples/mlp_mnist.py | 9 | 2214 | #!/usr/bin/env python
"""
Digit classification
====================
"""
import numpy as np
import matplotlib.pyplot as plt
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
weight_gain = 2.0
weight_decay = 0.0005
net = dp.NeuralNetwork(
layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller()),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15]
learn_rate = 0.05
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs,
learn_rule=dp.Momentum(learn_rate=learn_rate/10**i, momentum=0.94),
)
trainer.train(net, train_input)
# Evaluate on test data
error = np.mean(net.predict(test_input) != y_test)
print('Test error rate: %.4f' % error)
# Plot dataset examples
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.title(title)
plt.tight_layout()
imgs = np.reshape(x_train[:63, ...], (-1, 28, 28))
plot_img(dp.misc.img_tile(dp.misc.img_stretch(imgs)),
'Dataset examples')
# Plot learned features in first layer
w = np.array(net.layers[0].weights.array)
w = np.reshape(w.T, (-1,) + dataset.img_shape)
w = w[np.argsort(np.std(w, axis=(1, 2)))[-64:]]
plot_img(dp.misc.img_tile(dp.misc.img_stretch(w)),
'Examples of features learned')
| mit |
ARudiuk/mne-python | examples/decoding/plot_linear_model_patterns.py | 9 | 3099 | """
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
"""
# Authors: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.copy().pick_types(meg=False, eeg=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
| bsd-3-clause |
deepakrana47/DT-RAE | roc.py | 1 | 4860 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm
from sklearn.metrics import roc_curve, auc
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
def ROC_plot(ddir, stp, num_feat, nhlayer, v_size):
# ddir = '/media/zero/41FF48D81730BD9B/Final_Thesies/results/SVM_results/deep21_v_200__0__15_1_min/'
trainset = np.loadtxt(ddir+'train_vector_dataset.csv', delimiter=',')
X_train = trainset[:, 1:]
ty_train = trainset[:,0]
y_train = np.zeros((trainset.shape[0],2))
for i in range(trainset.shape[0]):
if ty_train[i] == 1:
y_train[i,0] = 1
else:
y_train[i,1] = 1
testset = np.loadtxt(ddir+'test_vector_dataset.csv', delimiter=',')
X_test = testset[:, 1:]
ty_test = testset[:,0]
y_test = np.zeros((testset.shape[0],2))
for i in range(testset.shape[0]):
if ty_test[i] == 1:
y_test[i,0] = 1
else:
y_test[i,1] = 1
n_classes = 2
classifier = OneVsRestClassifier(svm.LinearSVC(penalty='l2',tol=0.001, C=1.0, loss='hinge',
fit_intercept=True, intercept_scaling=1.0, dual=True, verbose=0, random_state=None, max_iter=100000))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
plt.plot(fpr[0], tpr[0], color='aqua', lw=lw,
label='ROC curve of paraphrase {0} (area = {1:0.2f})'
''.format(0, roc_auc[0]))
plt.plot(fpr[1], tpr[1], color='darkorange', lw=lw,
label='ROC curve of nonparaphrase {0} (area = {1:0.2f})'
''.format(1, roc_auc[1]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC for (hidden layer='+str(nhlayer)+'; stopword='+str(stp)+'; number_feature='+str(num_feat)+'; vect_size='+str(v_size)+')')
plt.legend(loc="lower right")
plt.savefig(ddir+'ROC.png',dpi=1000)
plt.close()
if __name__ == '__main__':
ddir = '/media/zero/41FF48D81730BD9B/Final_Thesies/results/SVM_results/deep21_v_200__0__15_1_min/'
ROC_plot(ddir)
| apache-2.0 |
raghavrv/scikit-learn | sklearn/gaussian_process/kernels.py | 31 | 67169 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr in dir(self):
if attr.startswith("hyperparameter_"):
r.append(getattr(self, attr))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
dwettstein/pattern-recognition-2016 | mlp/neural_network/multilayer_perceptron.py | 1 | 50285 | """Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <[email protected]>
# Andreas Mueller
# Jiyuan Qian
# Licence: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.optimize import fmin_l_bfgs_b
import warnings
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from ._base import logistic, softmax
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from sklearn.externals import six
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import gen_batches, check_random_state
from sklearn.utils import shuffle
from sklearn.utils import check_array, check_X_y, column_or_1d
from .exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import _check_partial_fit_first_call
_STOCHASTIC_ALGOS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, algorithm,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon):
self.activation = activation
self.algorithm = algorithm
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations, with_output_activation=True):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
activations[i + 1] = hidden_activation(activations[i + 1])
# For the last layer
if with_output_activation:
output_activation = ACTIVATIONS[self.out_activation_]
activations[i + 1] = output_activation(activations[i + 1])
return activations
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
return coef_grads, intercept_grads
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in l-bfgs
Parameters
----------
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss = LOSS_FUNCTIONS[self.loss](y, activations[-1])
# Add L2 regularization term to loss
values = np.sum(
np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
diff = y - activations[-1]
deltas[last] = -diff
# Compute gradient for the last layer
coef_grads, intercept_grads = self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
derivative = DERIVATIVES[self.activation]
deltas[i - 1] *= derivative(activations[i])
coef_grads, intercept_grads = self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not isinstance(self, ClassifierMixin):
self.out_activation_ = 'identity'
# Output for multi class
elif self.label_binarizer_.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
if self.loss == 'log_loss':
self.loss = 'binary_log_loss'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
rng = check_random_state(self.random_state)
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1],
rng)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.algorithm in _STOCHASTIC_ALGOS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out, rng):
if self.activation == 'logistic':
# Use the initialization method recommended by
# Glorot et al.
init_bound = np.sqrt(2. / (fan_in + fan_out))
elif self.activation == 'tanh':
init_bound = np.sqrt(6. / (fan_in + fan_out))
elif self.activation == 'relu':
init_bound = np.sqrt(6. / (fan_in + fan_out))
else:
# this was caught earlier, just to make sure
raise ValueError("Unknown activation function %s" %
self.activation)
coef_init = rng.uniform(-init_bound, init_bound, (fan_in, fan_out))
intercept_init = rng.uniform(-init_bound, init_bound, fan_out)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
X, y = self._validate_input(X, y, incremental)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
if not hasattr(self, 'coefs_') or (not self.warm_start and not
incremental):
# First time training the model
self._initialize(y, layer_units)
# l-bfgs does not support mini-batches
if self.algorithm == 'l-bfgs':
batch_size = n_samples
elif self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
# Initialize lists
activations = [X]
activations.extend(np.empty((batch_size, n_fan_out))
for n_fan_out in layer_units[1:])
deltas = [np.empty_like(a_layer) for a_layer in activations]
coef_grads = [np.empty((n_fan_in_, n_fan_out_)) for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_) for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization algorithm
if self.algorithm in _STOCHASTIC_ALGOS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS algorithm
elif self.algorithm == 'l-bfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
# raise ValueError if not registered
supported_activations = ['logistic', 'tanh', 'relu']
if self.activation not in supported_activations:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s." % (self.activation,
supported_activations))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
if self.algorithm not in _STOCHASTIC_ALGOS + ["l-bfgs"]:
raise ValueError("The algorithm %s is not supported. " %
self.algorithm)
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
optimal_parameters, self.loss_, d = fmin_l_bfgs_b(
x0=packed_coef_inter,
func=self._loss_grad_lbfgs,
maxfun=self.max_iter,
iprint=iprint,
pgtol=self.tol,
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self._unpack(optimal_parameters)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
rng = check_random_state(self.random_state)
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.algorithm == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.algorithm == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
X, X_val, y, y_val = train_test_split(
X, y, random_state=self.random_state,
test_size=self.validation_fraction)
if isinstance(self, ClassifierMixin):
y_val = self.label_binarizer_.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
X, y = shuffle(X, y, random_state=rng)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
activations[0] = X[batch_slice]
batch_loss, coef_grads, intercept_grads = self._backprop(
X[batch_slice], y[batch_slice], activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > 2:
# not better than last two iterations by tol.
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for two consecutive epochs." % self.tol)
else:
msg = ("Training loss did not improve more than tol=%f"
" for two consecutive epochs." % self.tol)
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn('Stochastic Optimizer: Maximum iterations'
' reached and the optimization hasn\'t '
'converged yet.'
% (), ConvergenceWarning)
except KeyboardInterrupt:
pass
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.algorithm not in _STOCHASTIC_ALGOS:
raise AttributeError("partial_fit is only available for stochastic"
"optimization algorithms. %s is not"
" stochastic" % self.algorithm)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
return self._fit(X, y, incremental=True)
def _decision_scores(self, X):
"""Predict using the trained model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = [X]
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations, with_output_activation=False)
y_pred = activations[-1]
return y_pred
class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
"""Multi-layer Perceptron classifier.
This algorithm optimizes the log-loss function using l-bfgs or gradient
descent.
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
algorithm : {'l-bfgs', 'sgd', 'adam'}, default 'adam'
The algorithm for weight optimization.
- 'l-bfgs' is an optimization algorithm in the family of
quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimization algorithm
proposed by Kingma, Diederik, and Jimmy Ba
Note: The default algorithm 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'l-bfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the algorithm is 'l-bfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate ``learning_rate_`` at
each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when algorithm='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The algorithm iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
algorithm='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when algorithm='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when algorithm='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when algorithm='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when algorithm='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when algorithm='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when algorithm='adam'
Attributes
----------
`classes_` : array or list of array of shape (n_classes,)
Class labels for each output.
`loss_` : float
The current loss computed with the loss function.
`label_binarizer_` : LabelBinarizer
A LabelBinarizer object trained on the training set.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the algorithm has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
algorithm='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPClassifier, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, algorithm=algorithm, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
self.label_binarizer_ = LabelBinarizer()
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
self.label_binarizer_.fit(y)
if not hasattr(self, 'classes_') or not incremental:
self.classes_ = self.label_binarizer_.classes_
else:
classes = self.label_binarizer_.classes_
if not np.all(np.in1d(classes, self.classes_)):
raise ValueError("`y` has classes not in `self.classes_`."
" `self.classes_` has %s. 'y' has %s." %
(self.classes_, classes))
y = self.label_binarizer_.transform(y)
return X, y
def decision_function(self, X):
"""Decision function of the mlp model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The values of decision function for each class in the model.
"""
check_is_fitted(self, "coefs_")
y_scores = self._decision_scores(X)
if self.n_outputs_ == 1:
return y_scores.ravel()
else:
return y_scores
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self, "coefs_")
y_scores = self.decision_function(X)
y_scores = ACTIVATIONS[self.out_activation_](y_scores)
return self.label_binarizer_.inverse_transform(y_scores)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.algorithm not in _STOCHASTIC_ALGOS:
raise AttributeError("partial_fit is only available for stochastic"
"optimization algorithms. %s is not"
" stochastic" % self.algorithm)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
_check_partial_fit_first_call(self, classes)
super(MLPClassifier, self)._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : array-like, shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
y_scores = self.decision_function(X)
if y_scores.ndim == 1:
y_scores = logistic(y_scores)
return np.vstack([1 - y_scores, y_scores]).T
else:
return softmax(y_scores)
class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
"""Multi-layer Perceptron regressor.
This algorithm optimizes the squared-loss using l-bfgs or gradient descent.
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
algorithm : {'l-bfgs', 'sgd', 'adam'}, default 'adam'
The algorithm for weight optimization.
- 'l-bfgs' is an optimization algorithm in the family of
quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimization algorithm
proposed by Kingma, Diederik, and Jimmy Ba
Note: The default algorithm 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'l-bfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the algorithm is 'l-bfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate ``learning_rate_`` at
each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when algorithm='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The algorithm iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
algorithm='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when algorithm='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when algorithm='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when algorithm='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when algorithm='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when algorithm='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when algorithm='adam'
Attributes
----------
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the algorithm has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
algorithm='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPRegressor, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, algorithm=algorithm, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self, "coefs_")
y_pred = self._decision_scores(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
| mit |
louispotok/pandas | pandas/util/_validators.py | 4 | 13041 | """
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame or Panel
arg : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO(PY3): Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/api/custom_scale_example.py | 9 | 6401 | from __future__ import unicode_literals
import numpy as np
from numpy import ma
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import Formatter, FixedLocator
class MercatorLatitudeScale(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
source:
http://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the
# string used to select the scale. For example,
# ``gca().set_yscale("mercator")`` would be used to select this
# scale.
name = 'mercator'
def __init__(self, axis, **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and
``set_yscale`` will be passed along to the scale's
constructor.
thresh: The degree above which to crop the data.
"""
mscale.ScaleBase.__init__(self)
thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi)
if thresh >= np.pi / 2.0:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in ``ticker.py``.
In our case, the Mercator example uses a fixed locator from
-90 to 90 degrees and a custom formatter class to put convert
the radians to degrees and put a degree symbol after the
value::
"""
class DegreeFormatter(Formatter):
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
return "%d\u00b0" % ((x / np.pi) * 180.0)
deg2rad = np.pi / 180.0
axis.set_major_locator(FixedLocator(
np.arange(-90, 90, 10) * deg2rad))
axis.set_major_formatter(DegreeFormatter())
axis.set_minor_formatter(DegreeFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
"""
This transform takes an Nx1 ``numpy`` array and returns a
transformed copy. Since the range of the Mercator scale
is limited by the user-specified threshold, the input
array must be masked to contain only valid values.
``matplotlib`` will handle masked arrays and remove the
out-of-range data from the plot. Importantly, the
``transform`` method *must* return an array that is the
same shape as the input array, since these values need to
remain synchronized with values in the other dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
else:
return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
def inverted(self):
"""
Override this method so matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.arctan(np.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(MercatorLatitudeScale)
if __name__ == '__main__':
import matplotlib.pyplot as plt
t = np.arange(-180.0, 180.0, 0.1)
s = t / 360.0 * np.pi
plt.plot(t, s, '-', lw=2)
plt.gca().set_yscale('mercator')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Mercator: Projection of the Oppressor')
plt.grid(True)
plt.show()
| mit |
dsjoerg/esdb | scripts/army_income.py | 1 | 1448 | #!/Users/david/local/bin/python
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.stats import norm
import numpy.lib.recfunctions
import math
resdir = '/Users/David/Dropbox/Daves_Docs/ggtracker/research/s2gs_20130215'
leaguenames = ['Bronze', 'Silver', 'Gold', 'Platinum', 'Diamond', 'Master']
snaps = np.load("{}/snaps.npy".format(resdir))
subsnap = dict()
for second in np.arange(5,21) * 60:
subsnap[second] = snaps[snaps['seconds'] == second]
print "Subsnaps arranged"
plt.rc('font', size=5)
for second in np.arange(5,21) * 60:
print "Doing {}".format(second)
plt.figure(figsize=(8,6), dpi=100)
fignum=1
for league in range(0,6):
plt.subplot(2,3,fignum)
fignum=fignum+1
leaguesnap = subsnap[second]['trueleague'] == league
xdesc = sp.stats.describe(subsnap[second][leaguesnap]['army1'])
ydesc = sp.stats.describe(subsnap[second][leaguesnap]['income1'])
xmin = 0
ymin = 0
xmax = xdesc[2] + 3.0 * math.sqrt(xdesc[3])
ymax = ydesc[2] + 3.0 * math.sqrt(ydesc[3])
plt.hexbin(subsnap[second][leaguesnap]['army1'], subsnap[second][leaguesnap]['income1'], cmap=plt.cm.YlOrRd_r, gridsize=30, extent=(0,xmax,0,ymax))
plt.title(leaguenames[league])
plt.savefig("{}/army_income_{}.pdf".format(resdir, second))
| gpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/tseries/holiday.py | 1 | 14270 | from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
from pandas.tseries.offsets import Easter, Day
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = start_date
self.end_date = end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
if self.start_date is not None:
start_date = self.start_date
if self.end_date is not None:
end_date = self.end_date
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
year_offset = DateOffset(years=1)
base_date = Timestamp(
datetime(start_date.year, self.month, self.day),
tz=start_date.tz,
)
dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = list(filter(lambda x: x is not None and
x.dayofweek in self.days_of_week,
holiday_dates))
else:
holiday_dates = list(filter(lambda x: x is not None, holiday_dates))
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to an
iterable of dates.
Parameters
----------
dates : array-like
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return map(lambda d: self.observance(d), dates)
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
dates = list(map(lambda d: d + offset, dates))
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_names : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.', month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified
by: https://www.opm.gov/policy-data-oversight/snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
| mit |
RalphBariz/RalphsDotNet | Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
vortex-ape/scikit-learn | examples/cluster/plot_color_quantization.py | 39 | 3356 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
trichter/yam | yam/tests/test_main.py | 1 | 12478 | # Copyright 2017-2019 Tom Eulenfeld, MIT license
import unittest
from pkg_resources import load_entry_point
from contextlib import redirect_stderr, redirect_stdout
import glob
import io
import logging
import os
import shutil
import sys
import time
import tempfile
import tqdm
import matplotlib
matplotlib.use('Agg')
def _replace_in_file(fname_src, fname_dest, str_src, str_dest):
with open(fname_src) as f:
text = f.read()
text = text.replace(str_src, str_dest)
with open(fname_dest, 'w') as f:
f.write(text)
class TestCase(unittest.TestCase):
def setUp(self):
args = sys.argv[1:]
self.verbose = '-v' in args
self.permanent_tempdir = '-p' in args
self.less_data = '--full' not in args
self.njobs = args[args.index('-n') + 1] if '-n' in args else None
if self.permanent_tempdir:
tempdir = os.path.join(tempfile.gettempdir(), 'yam_test')
if os.path.exists(tempdir) and '-d' in args:
shutil.rmtree(tempdir)
if not os.path.exists(tempdir):
os.mkdir(tempdir)
else:
tempdir = tempfile.mkdtemp(prefix='yam_test_')
self.cwd = os.getcwd()
os.chdir(tempdir)
self.tempdir = tempdir
# for coverage put .coveragerc config file into tempdir
# and append correct data_file parameter to config file
covfn = os.path.join(self.cwd, '.coverage')
if not os.path.exists('.coveragerc') and os.path.exists(covfn + 'rc'):
_replace_in_file(covfn + 'rc', '.coveragerc', '[run]',
'[run]\ndata_file = ' + covfn)
self.plotdir = os.path.join(self.tempdir, 'plots')
self.script = load_entry_point('yam', 'console_scripts', 'yam')
total = 74 - 2 * self.permanent_tempdir
self.pbar = tqdm.tqdm(total=total, desc='CLI tests passed')
def out(self, cmd, text=None):
"""Test if text is in output of command"""
# for TRAVIS use maximal two cores
if (self.njobs and '--njobs' not in cmd and
cmd.split()[0] in ('correlate', 'stretch')):
cmd = cmd + ' --njobs ' + self.njobs
# disabling the logger is necessary, because the logging
# configuration cannot be changed easily on subsequent calls
# of yam in this test suite
if self.verbose and cmd.split()[0] in ('correlate', 'stack', 'stretch'):
if '-v' not in cmd:
cmd = cmd + ' -vvv'
logging.getLogger('yam').disabled = False
elif self.verbose:
logging.getLogger('yam').disabled = True
if self.verbose:
tqdm.tqdm.write('> yam ' + cmd)
# catching all output, print only if tests are run with -v
try:
with io.StringIO() as f:
with redirect_stdout(f), redirect_stderr(f):
try:
self.script(cmd.split())
except SystemExit:
pass
output = f.getvalue()
if self.verbose:
tqdm.tqdm.write(output)
finally:
self.pbar.update(1)
if text is not None:
self.assertIn(text, output)
return output
def checkplot(self, bname):
"""Test if plot file exists"""
fname = os.path.join(self.plotdir, bname)
self.assertTrue(os.path.exists(fname), msg='%s missing' % fname)
def test_cli(self):
# create tutorial
self.out('-h', 'print information about')
self.out('--version', '.')
self.out('bla', 'invalid choice')
if not self.permanent_tempdir:
self.out('info', "No such file or directory: 'conf.json'")
self.out('create', '')
if not self.permanent_tempdir:
self.out('info', 'Not found')
cmd = 'create --tutorial' + ' --less-data' * self.less_data
self.out(cmd)
self.out(cmd)
self.out('scan', 'obspy-scan')
# check basics
_replace_in_file('conf.json', 'conf2.json', '"io"', '"io",')
self.out('-c conf2.json info', 'parsing the conf')
self.out('info', '3 stations')
self.out('info stations', 'CX.PATCX..BHZ')
self.out('info data', 'BHZ__20100204T000000Z__20100205T000000Z.mseed')
self.out('print stations', 'CX.PATCX..BHZ')
pr = self.out('print data CX.PB06..BHZ 2010-02-05', '2010-02-05')
self.out('print data CX.PATCX..BHZ 2010-036', '2010-02-05')
self.out('print prepdata CX.PATCX..BHZ 2010-02-05 1', 'samples')
self.out('print data', 'seedid')
self.out('print prepdata CX.PATCX..BHZ 2010-02-05', 'corrid')
# check remove_response on one day
_replace_in_file(
'conf.json', 'conf2.json', '{"clip_factor": 2},',
'{"clip_factor":2, "clip_set_zero":true},"remove_response":true,')
self.out('-c conf2.json print prepdata CX.PATCX..BHZ 2010-02-05 1a')
# comment until ObsPy issue 2097 is fixed
# try:
# self.out('plot stations') # takes long
# except ImportError:
# pass
# else:
# self.checkplot('stations.png')
self.out('plot data CX.PATCX..BHZ 2010-02-05')
self.checkplot('data_CX.PATCX..BHZ_2010-02-05.png')
self.out('plot prepdata CX.PATCX..BHZ 2010-02-05 1')
self.checkplot('prepdata_CX.PATCX..BHZ_2010-02-05_c1.png')
# check data_plugin
data_plugin_file = """
from obspy import read
EXPR = ("example_data/{network}.{station}.{location}.{channel}__"
"{t.year}{t.month:02d}{t.day:02d}*.mseed")
# def get_data(starttime, endtime, network, station, location, channel):
def get_data(starttime, endtime, **smeta):
fname = EXPR.format(t=starttime, **smeta)
stream = read(fname, 'MSEED')
# load also previous file, because startime in day file is after 0:00
fname = EXPR.format(t=starttime-5, **smeta)
stream += read(fname, 'MSEED')
return stream
"""
with open('data.py', 'w') as f:
f.write(data_plugin_file)
_replace_in_file('conf.json', 'conf2.json', '"data_plugin": null',
'"data_plugin": "data : get_data"')
self.out('-c conf2.json info', 'data : get_data')
self.out('-c conf2.json info data', 'data : get_data')
pr2 = self.out('-c conf2.json print data CX.PB06..BHZ 2010-02-05')
self.assertEqual(pr, pr2)
# check correlation
t1 = time.time()
self.out('correlate 1') # takes long
t2 = time.time()
self.out('correlate 1 -v')
t3 = time.time()
if not self.permanent_tempdir:
self.assertLess(t3 - t2, 0.5 * (t2 - t1))
self.out('correlate auto') # takes long
self.out('correlate 1a') # takes long
self.out('info', 'c1_s1d: 7 combs')
self.out('info', 'c1a_s1d: 3 combs')
self.out('info', 'cauto: 2 combs')
cauto_info = self.out('info cauto',
'CX.PATCX/.BHZ-.BHZ/2010-02/2010-02-05')
self.out('info c1_s1d/CX.PB06-CX.PB06/.BHZ-.BHZ', 'CX.PB06-CX.PB06')
self.out('print cauto', '1201 samples')
expected = '%d Trace' % (2 if self.less_data else 11)
self.out('print c1_s1d/CX.PB06-CX.PB06/.BHZ-.BHZ', expected)
# check if correlation without parallel processing gives the same
# result keys
with self.assertWarnsRegex(UserWarning, 'only top level keys'):
self.out('remove cauto/CX.PATCX-CX.PATCX')
self.out('remove cauto')
self.out('correlate auto --parallel-inner-loop') # takes long
self.maxDiff = None
cauto_info_seq = self.out('info cauto')
self.assertEqual(cauto_info, cauto_info_seq)
# check plots of correlation
po = ('--plot-options {"xlim":[0,10],"figsize":[10,10],'
'"ylim":[null,"2010-02-05"]}')
self.out('plot c1_s1d --plottype vs_dist')
self.checkplot('corr_vs_dist_c1_s1d_ZZ.png')
self.out('plot c1_s1d/CX.PATCX-CX.PB06 --plottype wiggle')
self.out('plot cauto --plottype wiggle %s' % po)
bname = 'corr_vs_time_wiggle_c1_s1d_CX.PATCX-CX.PB06_'
self.checkplot(bname + '.BHZ-.BHZ.png')
self.checkplot(bname + '.BHN-.BHZ.png')
bname = 'corr_vs_time_wiggle_cauto_CX.PATCX-CX.PATCX_'
self.checkplot(bname + '.BHZ-.BHZ.png')
self.checkplot(bname + '.BHN-.BHZ.png')
self.out('plot c1_s1d/CX.PATCX-CX.PB06')
self.out('plot cauto %s' % po)
bname = 'corr_vs_time_c1_s1d_CX.PATCX-CX.PB06_'
self.checkplot(bname + '.BHZ-.BHZ.png')
self.checkplot(bname + '.BHN-.BHZ.png')
bname = 'corr_vs_time_cauto_CX.PATCX-CX.PATCX_'
self.checkplot(bname + '.BHZ-.BHZ.png')
self.checkplot(bname + '.BHN-.BHZ.png')
# check stacking
self.out('stack c1_s1d 2d')
self.out('info', 'c1_s1d_s2d: 7 combs')
self.out('stack c1_s1d 2dm1d')
self.out('info', 'c1_s1d_s2dm1d: 7 combs')
self.out('stack c1_s1d 1')
self.out('stack cauto 2') # takes long
self.out('info', 'cauto_s2')
self.out('remove c1_s1d_s2dm1d c1_s1d_s1')
# check stretching
po = ('--plot-options {"show_line":true,'
'"xlim":[null,"2010-02-05"]}')
self.out('stretch c1_s1d/CX.PATCX-CX.PATCX 1')
self.out('stretch c1_s1d/CX.PATCX-CX.PATCX 1b')
self.out("stack c1_s1d None")
self.out('stretch c1_s1d 1 --reftrid c1_s1d_s')
self.out('stretch cauto/CX.PATCX-CX.PATCX/.BHZ-.BHZ 2')
self.out('stretch cauto 2')
self.out('remove cauto_t2/CX.PATCX-CX.PATCX/.BHZ-.BHZ')
self.out('stretch cauto 2 --njobs 1')
self.out('remove cauto_t2/CX.PATCX-CX.PATCX/.BHZ-.BHZ')
_replace_in_file('conf.json', 'conf2.json', '"sides": "right"',
'"time_period": [null, "2010-02-05"], "max_lag": 40, '
'"sides": "right"')
self.out('-c conf2.json stretch cauto/CX.PATCX-CX.PATCX/.BHZ-.BHZ 2')
self.out('-c conf2.json stretch cauto/CX.PATCX-CX.PATCX/.BHZ-.BHZ 2b')
self.out('plot c1_s1d_t1/CX.PATCX-CX.PB06 %s' % po)
self.out('plot c1_s1d_t1b/CX.PATCX-CX.PB06 %s' % po)
self.out('plot cauto_t2 --plottype wiggle', 'not supported')
self.out('plot cauto_t2')
self.out('plot cauto_t2b')
globexpr = os.path.join(self.plotdir, 'sim_mat_cauto_t2*.png')
self.assertEqual(len(glob.glob(globexpr)), 3)
globexpr = os.path.join(self.plotdir, 'sim_mat_c1_s1d_t1*.png')
if not self.permanent_tempdir:
self.assertEqual(len(glob.glob(globexpr)), 3)
self.out('plot c1_s1d_t1')
num_plots = 5 if self.less_data else 7
self.assertEqual(len(glob.glob(globexpr)), num_plots)
num_combs = 5 if self.less_data else 7
self.out('info', 'c1_s1d_t1: %d combs' % num_combs)
self.out('info cauto_t2', 'CX.PATCX-CX.PATCX/.BHZ-.BHZ')
info_t = self.out('print cauto_t2', 'num_stretch')
self.assertGreater(len(info_t.splitlines()), 100) # lots of lines
# check export
fname = 'dayplot.mseed'
self.out('export data %s CX.PB06..BHZ 2010-02-05' % fname)
self.assertTrue(os.path.exists(fname), msg='%s missing' % fname)
fname = 'dayplot.mseed'
self.out('export data %s CX.PB06..BHZ 2010-02-05' % fname)
self.assertTrue(os.path.exists(fname), msg='%s missing' % fname)
fname = 'some_auto_corrs.h5'
self.out('export cauto/CX.PATCX-CX.PATCX %s --format H5' % fname)
self.assertTrue(os.path.exists(fname), msg='%s missing' % fname)
# check load (IPython mocked)
sys.modules['IPython'] = unittest.mock.MagicMock()
self.out('load cauto', 'Good Bye')
self.out('load c1_s1d', 'Good Bye')
self.out('load c1_s1d_t1/CX.PATCX-CX.PB01', 'Good Bye')
def tearDown(self):
os.chdir(self.cwd)
if not self.permanent_tempdir:
try:
shutil.rmtree(self.tempdir)
except PermissionError as ex:
print('Cannot remove temporary directory: %s' % ex)
def suite():
return unittest.makeSuite(TestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
yarden-livnat/regulus | regulus/models/inv_reg.py | 1 | 3790 | import math
import numpy as np
import pandas as pd
SIGMA = 0.1
N = 40
def radial_kernel(x0, X, sigma):
return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * sigma * sigma))
def gaussian(sigma):
f = 1 / (math.sqrt(2*math.pi) * sigma)
def kernel(x, X):
return f * np.exp(np.sum((X - x) ** 2, axis=1) / (-2 * sigma * sigma))
return kernel
GAUSSIAN = gaussian(SIGMA)
def lowess(X, Y, kernel=GAUSSIAN):
# add bias term
X = np.c_[np.ones(len(X)), X]
def f(x):
x = np.r_[1, x]
# fit model: normal equations with kernel
xw = X.T * kernel(x, X)
beta = np.linalg.pinv(xw @ X) @ xw @ Y
# predict value
return x @ beta
return f
def sample_lowess(S, X, Y, kernel=GAUSSIAN):
f = lowess(X, Y, kernel)
return np.array([f(s) for s in S])
def inverse_lowess(X, Y, S=None, kernel=GAUSSIAN, n=N):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), n)
return sample_lowess(S, Y, X, kernel) # note swap of X and Y
def inverse_lowess_std(X, Y, S=None, kernel=GAUSSIAN, n=N):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), n)
Y1 = np.c_[np.ones(len(Y)), Y]
S1 = np.c_[np.ones(len(S)), S]
W = np.array([kernel(s, Y1) for s in S1])
denom = W.sum(axis=1)
rho = (inverse_lowess(X, Y, S=Y, kernel=kernel) - X) ** 2
wr = W @ rho
std = np.sqrt(np.c_[[wr[c]/denom for c in list(wr)]])
return std.T
def inverse(X, Y, kernel=GAUSSIAN, S=None, scaler=None):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), N)
if len(S) < 2:
return pd.DataFrame(columns=X.columns), pd.DataFrame(columns=X.columns)
line = inverse_lowess(X, Y, S, kernel)
std = inverse_lowess_std(X, Y, S, kernel=kernel)
if scaler is not None:
line = scaler.inverse_transform(line)
std = std * scaler.scale_
# return S, line, std
index = pd.Index(S, name=Y.name)
curve = pd.DataFrame(line, index=index, columns=X.columns)
curve_std = pd.DataFrame(std, index=index, columns=X.columns)
return curve, curve_std
# def inverse_regression_generator(kernel=GAUSSIAN, bandwidth=0.3, scale=True):
# def f(context, node):
# partition = node.data
# if partition.y.size < 2:
# return []
#
# sigma = bandwidth * (partition.max() - partition.min())
# kernel = gaussian(sigma)
# scaler = node.regulus.pts.scaler if scale else None
# S, line, std = inverse(partition.x, partition.y, kernel, scaler)
# return [dict(x=line[:, c], y=S, std=std[:, c]) for c in range(partition.x.shape[1])]
# return f
def def_inverse(bandwidth_factor=0.2):
def f(context, node):
if hasattr(node, 'data'):
partition = node.data
else:
partition = node
if partition.y.size < 2:
return []
sigma = bandwidth_factor * (partition.max() - partition.min())
kernel = gaussian(sigma)
scaler = node.regulus.pts.scaler
data_range = context['data_range']
S = np.linspace(*data_range, N)
S1 = S[(S >= np.amin(partition.y)) & (S <= np.amax(partition.y))]
return inverse(partition.x, partition.y, kernel, S1, scaler)
return f
def inverse_regression(context, node):
if hasattr(node, 'data'):
partition = node.data
else:
partition = node
if partition.y.size < 2:
return []
sigma = 0.2 * (partition.max() - partition.min())
kernel = gaussian(sigma)
scaler = node.regulus.pts.scaler
data_range = context['data_range']
S = np.linspace(*data_range, N)
S1 = S[(S >= np.amin(partition.y)) & (S <= np.amax(partition.y))]
return inverse(partition.x, partition.y, kernel, S1, scaler ) | bsd-3-clause |
aricooperman/Jzipline | zipline/pipeline/loaders/frame.py | 3 | 6378 | """
PipelineLoader accepting a DataFrame as input.
"""
from functools import partial
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.pandas_utils import sort_values
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(PipelineLoader):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex_axis(ADJUSTMENT_COLUMNS, axis=1)
sort_values(adjustments, ['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load data from our stored baseline.
"""
column = self.column
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
elif columns[0] != column:
raise ValueError("Can't load unknown column %s" % columns[0])
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(assets)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=self.baseline[ix_(date_indexer, assets_indexer)],
# Mask out requested columns/rows that didnt match.
mask=(good_assets & good_dates[:, None]) & mask,
adjustments=self.format_adjustments(dates, assets),
missing_value=column.missing_value,
),
}
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/mixture/tests/test_bayesian_mixture.py | 2 | 15056 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
dirichlet_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(dirichlet_concentration)) -
np.sum(gammaln(dirichlet_concentration)))
predected_norm = _log_dirichlet_norm(dirichlet_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type,
bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of dirichlet_concentration_prior
bad_dirichlet_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
dirichlet_concentration_prior=bad_dirichlet_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'dirichlet_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_dirichlet_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of dirichlet_concentration_prior
dirichlet_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
dirichlet_concentration_prior=dirichlet_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(dirichlet_concentration_prior,
bgmm.dirichlet_concentration_prior_)
# Check correct init for the default value of dirichlet_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.dirichlet_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
# Check the weights values
expected_weights = (bgmm.dirichlet_concentration_ /
np.sum(bgmm.dirichlet_concentration_))
predected_weights = bgmm.weights_
assert_almost_equal(expected_weights, predected_weights)
# Check the weights sum = 1
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(n_components=2 * n_components,
covariance_type=covar_type,
warm_start=True, max_iter=1,
random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(500):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=2 * n_components,
covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X)
full_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(n_components=2 * n_components,
covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X)
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(n_components=2 * n_components,
covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X)
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov) for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(n_components=2 * n_components,
covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X)
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
| bsd-3-clause |
bachiraoun/fullrmc | Core/Collection.py | 1 | 96743 | """
It contains a collection of methods and classes that are useful for the package.
"""
# standard libraries imports
from __future__ import print_function
import os
import sys
import time
import uuid
from random import random as generate_random_float # generates a random float number between 0 and 1
from random import randint as generate_random_integer # generates a random integer number between given lower and upper limits
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
def get_caller_frames(engine, frame, subframeToAll, caller):
"""
Get list of frames for a function caller.
:Parameters:
#. engine (None, Engine): The stochastic engine in consideration.
#. frame (None, string): The frame name. If engine is given as
None, only None will be accepted as frame value.
#. subframeToAll (boolean): If frame is a subframe then all multiframe
subframes must be considered.
#. caller (string): Caller name for logging and debugging purposes.
:Returns:
#. usedIncluded (boolean): Whether engine used frame is included in
the built frames list. If frame is given as None, True will always
be returned.
#. frame (string): The given frame in parameters. If subframe is given
and subframeToAll is True, then multiframe is returned.
#. allFrames (list): List of all frames.
.. code-block:: python
import inspect
from fullrmc.Core.Collection import get_caller_frames
# Assuming self is a constraint and get_caller_frames is called from within a method ...
usedIncluded, frame, allFrames = get_caller_frames(engine=self.engine,
frame='frame_name',
subframeToAll=True,
caller="%s.%s"%(self.__class__.__name__,inspect.stack()[0][3]) )
"""
usedFrame = None
if engine is not None:
usedFrame = engine.usedFrame
if frame is None:
frame = usedFrame
usedIncluded = True
if frame is not None:
isNormalFrame, isMultiframe, isSubframe = engine.get_frame_category(frame=frame)
assert not isMultiframe, LOGGER.error("This should have never happened @%s._get_method_frames_from_frame_argument. Please report issue ..."%(caller,))
if isSubframe and subframeToAll:
_frame = frame.split(os.sep)[0]
LOGGER.usage("Given frame is None while engine used frame '%s' is a subframe. %s will be applied to all subframes of '%s'"%(frame, caller, _frame))
frame = _frame
allFrames = [os.path.join(frame, frm) for frm in engine.frames[frame]['frames_name']]
else:
allFrames = [frame]
else:
allFrames = []
else:
assert engine is not None, LOGGER.error("Engine is not given, frame must be None where '%s' is given"%(frame,))
isNormalFrame, isMultiframe, isSubframe = engine.get_frame_category(frame=frame)
assert usedFrame is not None, LOGGER.error("This should have never happened @%s._get_method_frames_from_frame_argument. Please report issue ..."%(caller,))
if isMultiframe or (subframeToAll and isSubframe):
if isMultiframe:
LOGGER.usage("Given frame '%s' is a multiframe. %s will be applied to all subframes"%(frame, caller))
else:
_frame = frame.split(os.sep)[0]
LOGGER.usage("Given frame '%s' is a subframe. %s will be applied to all subframes of '%s'"%(frame, caller, _frame))
frame = _frame
allFrames = [os.path.join(frame, frm) for frm in engine.frames[frame]['frames_name']]
else:
allFrames = [frame]
usedIncluded = usedFrame==frame or usedFrame.split(os.sep)[0] == frame
# return
return usedIncluded, frame, allFrames
def get_real_elements_weight(elements, weightsDict, weighting):
"""
Get elements weights given a dictionary of weights and a weighting scheme.
If element weight is not defined in weightsDict then weight is fetched
from pdbparser elements database using weighting scheme.
:Parameters:
#. elements (list): List of elements.
#. weightsDict (dict): Dictionary of fixed weights.
#. weighting (str): Weighting scheme.
:Returns:
#. elementsWeight (dict): Elements weights got from weightsDict
and completed using weighting scheme,
"""
elementsWeight = {}
for el in elements:
w = weightsDict.get(el, get_element_property(el,weighting))
if w is None:
raise LOGGER.error("element '%s' weight is found to be None. Numerical elements weight is required."%(el) )
if isinstance(w, complex):
LOGGER.fixed("element '%s' weight is found to be complex '%s'. Value is cast to its real part."%(el,w) )
w = w.real
try:
w = FLOAT_TYPE(w)
except:
raise LOGGER.error("element '%s' is found to not numerical '%s'."%(el,w) )
elementsWeight[el] = w
# return dict
return elementsWeight
def raise_if_collected(func):
""" Constraints method decorator that raises an error whenever the
method is called and the system has atoms that were removed.
"""
def wrapper(self, *args, **kwargs):
assert not len(self._atomsCollector), LOGGER.error("Calling '%s.%s' is not allowed when system has collected atoms."%(self.__class__.__name__,func.__name__))
return func(self, *args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
def reset_if_collected_out_of_date(func):
""" Constraints method decorator that resets the constraint whenever
the method is called and the system has atoms that were removed.
"""
def wrapper(self, *args, **kwargs):
if self.engine is not None:
if set(self.engine._atomsCollector.indexes) != set(self._atomsCollector.indexes):
# reset constraints
self.reset_constraint()
# collect atoms
for realIndex in self.engine._atomsCollector.indexes:
self._on_collector_collect_atom(realIndex=realIndex)
return func(self, *args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
def is_number(number):
"""
Check if number is convertible to float.
:Parameters:
#. number (str, number): Input number.
:Returns:
#. result (bool): True if convertible, False otherwise
"""
if isinstance(number, (int, long, float, complex)):
return True
try:
float(number)
except:
return False
else:
return True
def is_integer(number, precision=10e-10):
"""
Check if number is convertible to integer.
:Parameters:
#. number (str, number): Input number.
#. precision (number): To avoid floating errors,
a precision should be given.
:Returns:
#. result (bool): True if convertible, False otherwise.
"""
if isinstance(number, (int, long)):
return True
try:
number = float(number)
except:
return False
else:
if np.abs(number-int(number)) < precision:
return True
else:
return False
def get_elapsed_time(start, format="%d days, %d hours, %d minutes, %d seconds"):
"""
Get formatted time elapsed.
:Parameters:
#. start (time.time): A time instance.
#. format (string): The format string. must contain exactly four '%d'.
:Returns:
#. time (string): The formatted elapsed time.
"""
# get all time info
days = divmod(time.time()-start,86400)
hours = divmod(days[1],3600)
minutes = divmod(hours[1],60)
seconds = minutes[1]
return format % (days[0],hours[0],minutes[0],seconds)
def get_memory_usage():
"""
Get current process memory usage. This is method requires
psutils to be installed.
:Returns:
#. memory (float, None): The memory usage in Megabytes.
When psutils is not installed, None is returned.
"""
try:
import psutil
process = psutil.Process(os.getpid())
memory = float( process.memory_info()[0] ) / float(2 ** 20)
except:
LOGGER.warn("memory usage cannot be profiled. psutil is not installed. pip install psutil")
memory = None
return memory
def get_path(key=None):
"""
Get all paths information needed about the running script and python
executable path.
:Parameters:
#. key (None, string): the path to return. If not None is given,
it can take any of the following:\n
#. cwd: current working directory
#. script: the script's total path
#. exe: python executable path
#. script_name: the script name
#. relative_script_dir: the script's relative directory path
#. script_dir: the script's absolute directory path
#. fullrmc: fullrmc package path
:Returns:
#. path (dictionary, value): If key is not None it returns the value of paths
dictionary key. Otherwise all the dictionary is returned.
"""
import fullrmc
# check key type
if key is not None:
assert isinstance(key, basestring), LOGGER.error("key must be a string of None")
key=str(key).lower().strip()
# create paths
paths = {}
paths["cwd"] = os.getcwd()
paths["script"] = sys.argv[0]
paths["exe"] = os.path.dirname(sys.executable)
pathname, scriptName = os.path.split(sys.argv[0])
paths["script_name"] = scriptName
paths["relative_script_dir"] = pathname
paths["script_dir"] = os.path.abspath(pathname)
paths["fullrmc"] = os.path.split(fullrmc.__file__)[0]
# return paths
if key is None:
return paths
else:
assert key in paths, LOGGER.error("key is not defined")
return paths[key]
def rebin(data, bin=0.05, check=False):
"""
Re-bin 2D data of shape (N,2). In general, fullrmc requires equivalently
spaced experimental data bins. This function can be used to recompute
any type of experimental data according to a set bin size.
:Parameters:
#. data (numpy.ndarray): The (N,2) shape data where first column is
considered experimental data space values (e.g. r, q) and
second column experimental data values.
#. bin (number): New desired bin size.
#. check (boolean): whether to check arguments before rebining.
:Returns:
#. X (numpy.ndarray): First column re-binned.
#. Y (numpy.ndarray): Second column re-binned.
"""
if check:
assert isinstance(data, np.ndarray), Logger.error("data must be numpy.ndarray instance")
assert len(data.shape)==2, Logger.error("data must be of 2 dimensions")
assert data.shape[1] ==2, Logger.error("data must have 2 columns")
assert is_number(bin), LOGGER.error("bin must be a number")
bin = float(bin)
assert bin>0, LOGGER.error("bin must be a positive")
# rebin
x = data[:,0].astype(float)
y = data[:,1].astype(float)
rx = []
ry = []
x0 = int(x[0]/bin)*bin-bin/2.
xn = int(x[-1]/bin)*bin+bin/2.
bins = np.arange(x0,xn, bin)
if bins[-1] != xn:
bins = np.append(bins, xn)
# get weights histogram
W,E = np.histogram(x, bins=bins)
W[np.where(W==0)[0]] = 1
# get data histogram
S,E = np.histogram(x, bins=bins, weights=y)
# return
return (E[1:]+E[:-1])/2., S/W
def smooth(data, winLen=11, window='hanning', check=False):
"""
Smooth 1D data using window function and length.
:Parameters:
#. data (numpy.ndarray): the 1D numpy data.
#. winLen (integer): the smoothing window length.
#. window (str): The smoothing window type. Can be anything among
'flat', 'hanning', 'hamming', 'bartlett' and 'blackman'.
#. check (boolean): whether to check arguments before smoothing data.
:Returns:
#. smoothed (numpy.ndarray): the smoothed 1D data array.
"""
if check:
assert isinstance(data, np.ndarray), Logger.error("data must be numpy.ndarray instance")
assert len(data.shape)==1, Logger.error("data must be of 1 dimensions")
assert is_integer(winLen), LOGGER.error("winLen must be an integer")
winLen = int(bin)
assert winLen>=3, LOGGER.error("winLen must be bigger than 3")
assert data.size < winLen, LOGGER.error("data needs to be bigger than window size.")
assert window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'], LOGGER.error("window must be any of ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')")
# compute smoothed data
s=np.r_[data[winLen-1:0:-1],data,data[-1:-winLen:-1]]
if window == 'flat': #moving average
w=np.ones(winLen,'d')
else:
w=eval('np.'+window+'(winLen)')
S=np.convolve(w/w.sum(),s, mode='valid')
# get data and return
f = winLen/2
t = f-winLen+1
return S[f:t]
def get_random_perpendicular_vector(vector):
"""
Get random normalized perpendicular vector to a given vector.
:Parameters:
#. vector (numpy.ndarray, list, set, tuple): Given vector to compute
a random perpendicular vector to it.
:Returns:
#. perpVector (numpy.ndarray): Perpendicular vector of type
fullrmc.Globals.FLOAT_TYPE
"""
vectorNorm = np.linalg.norm(vector)
assert vectorNorm, LOGGER.error("vector returned 0 norm")
# easy cases
if np.abs(vector[0])<PRECISION:
return np.array([1,0,0], dtype=FLOAT_TYPE)
elif np.abs(vector[1])<PRECISION:
return np.array([0,1,0], dtype=FLOAT_TYPE)
elif np.abs(vector[2])<PRECISION:
return np.array([0,0,1], dtype=FLOAT_TYPE)
# generate random vector
randVect = 1-2*np.random.random(3)
randvect = np.array([vector[idx]*randVect[idx] for idx in range(3)])
# get perpendicular vector
perpVector = np.cross(randvect,vector)
# normalize, coerce and return
return np.array(perpVector/np.linalg.norm(perpVector), dtype=FLOAT_TYPE)
def get_principal_axis(coordinates, weights=None):
"""
Calculate principal axis of a set of atoms coordinates.
:Parameters:
#. coordinates (np.ndarray): Atoms (N,3) coordinates array.
#. weights (numpy.ndarray, None): List of weights to compute the
weighted Center Of Mass (COM) calculation. Must be a numpy.ndarray
of numbers of the same length as indexes. None is accepted for
equivalent weighting.
:Returns:
#. center (numpy.ndarray): the weighted COM of the atoms.
#. eval1 (fullrmc.Globals.FLOAT_TYPE): Biggest eigen value.
#. eval2 (fullrmc.Globals.FLOAT_TYPE): Second biggest eigen value.
#. eval3 (fullrmc.Globals.FLOAT_TYPE): Smallest eigen value.
#. axis1 (numpy.ndarray): Principal axis corresponding to the
biggest eigen value.
#. axis2 (numpy.ndarray): Principal axis corresponding to the
second biggest eigen value.
#. axis3 (numpy.ndarray): Principal axis corresponding to the
smallest eigen value.
"""
# multiply by weights
if weights is not None:
coordinates[:,0] *= weights
coordinates[:,1] *= weights
coordinates[:,2] *= weights
norm = np.sum(weights)
else:
norm = coordinates.shape[0]
# compute center
center = np.array(np.sum(coordinates, 0)/norm, dtype=FLOAT_TYPE)
# coordinates in center
centerCoords = coordinates - center
# compute principal axis matrix
inertia = np.dot(centerCoords.transpose(), centerCoords)
# compute eigen values and eigen vectors
# warning eigen values are not necessary ordered!
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html
e_values, e_vectors = np.linalg.eig(inertia)
e_values = list(e_values)
e_vectors = list(e_vectors.transpose())
# get eval1 and axis1
eval1 = max(e_values)
vect1 = np.array(e_vectors.pop(e_values.index(eval1)), dtype=FLOAT_TYPE)
e_values.remove(eval1)
# get eval1 and axis1
eval2 = max(e_values)
vect2 = np.array(e_vectors.pop(e_values.index(eval2)), dtype=FLOAT_TYPE)
e_values.remove(eval2)
# get eval3 and axis3
eval3 = e_values[0]
vect3 = np.array(e_vectors[0], dtype=FLOAT_TYPE)
return center, FLOAT_TYPE(eval1), FLOAT_TYPE(eval2), FLOAT_TYPE(eval3), vect1, vect2, vect3
def get_rotation_matrix(rotationVector, angle):
"""
Calculate the rotation (3X3) matrix about an axis (rotationVector)
by a rotation angle.
:Parameters:
#. rotationVector (list, tuple, numpy.ndarray): Rotation axis
coordinates.
#. angle (float): Rotation angle in rad.
:Returns:
#. rotationMatrix (numpy.ndarray): Computed (3X3) rotation matrix
"""
angle = float(angle)
axis = rotationVector/np.sqrt(np.dot(rotationVector , rotationVector))
a = np.cos(angle/2)
b,c,d = -axis*np.sin(angle/2.)
return np.array( [ [a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c] ] , dtype = FLOAT_TYPE)
def rotate(xyzArray , rotationMatrix):
"""
Rotate (N,3) numpy.array using a rotation matrix.
The array itself will be rotated and not a copy of it.
:Parameters:
#. indexes (numpy.ndarray): the xyz (N,3) array to rotate.
#. rotationMatrix (numpy.ndarray): the (3X3) rotation matrix.
"""
arrayType = xyzArray.dtype
for idx in range(xyzArray.shape[0]):
xyzArray[idx,:] = np.dot( rotationMatrix, xyzArray[idx,:]).astype(arrayType)
return xyzArray
def get_orientation_matrix(arrayAxis, alignToAxis):
"""
Get the rotation matrix that aligns arrayAxis to alignToAxis
:Parameters:
#. arrayAxis (list, tuple, numpy.ndarray): xyzArray axis.
#. alignToAxis (list, tuple, numpy.ndarray): The axis to align to.
"""
# normalize alignToAxis
alignToAxisNorm = np.linalg.norm(alignToAxis)
assert alignToAxisNorm>0, LOGGER.error("alignToAxis returned 0 norm")
alignToAxis = np.array(alignToAxis, dtype=FLOAT_TYPE)/alignToAxisNorm
# normalize arrayAxis
arrayAxisNorm = np.linalg.norm(arrayAxis)
assert arrayAxisNorm>0, LOGGER.error("arrayAxis returned 0 norm")
arrayAxis = np.array(arrayAxis, dtype=FLOAT_TYPE)/arrayAxisNorm
# calculate rotationAngle
dotProduct = np.dot(arrayAxis, alignToAxis)
if np.abs(dotProduct-1) <= PRECISION :
rotationAngle = 0
elif np.abs(dotProduct+1) <= PRECISION :
rotationAngle = PI
else:
rotationAngle = np.arccos( dotProduct )
if np.isnan(rotationAngle) or np.abs(rotationAngle) <= PRECISION :
return np.array([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).astype(FLOAT_TYPE)
# calculate rotation axis.
if np.abs(rotationAngle-PI) <= PRECISION:
rotationAxis = get_random_perpendicular_vector(arrayAxis)
else:
rotationAxis = np.cross(alignToAxis, arrayAxis)
#rotationAxis /= np.linalg.norm(rotationAxis)
# calculate rotation matrix
return get_rotation_matrix(rotationAxis, rotationAngle)
def orient(xyzArray, arrayAxis, alignToAxis):
"""
Rotates xyzArray using the rotation matrix that rotates and aligns
arrayAxis to alignToAXis.
:Parameters:
#. xyzArray (numpy.ndarray): The xyz (N,3) array to rotate.
#. arrayAxis (list, tuple, numpy.ndarray): xyzArray axis.
#. alignToAxis (list, tuple, numpy.ndarray): The axis to align to.
"""
# normalize alignToAxis
alignToAxisNorm = np.linalg.norm(alignToAxis)
assert alignToAxisNorm>0, LOGGER.error("alignToAxis returned 0 norm")
alignToAxis = np.array(alignToAxis, dtype=FLOAT_TYPE)/alignToAxisNorm
# normalize arrayAxis
arrayAxisNorm = np.linalg.norm(arrayAxis)
assert arrayAxisNorm>0, LOGGER.error("arrayAxis returned 0 norm")
arrayAxis = np.array(arrayAxis, dtype=FLOAT_TYPE)/arrayAxisNorm
# calculate rotationAngle
dotProduct = np.dot(arrayAxis, alignToAxis)
if np.abs(dotProduct-1) <= PRECISION :
rotationAngle = 0
elif np.abs(dotProduct+1) <= PRECISION :
rotationAngle = PI
else:
rotationAngle = np.arccos( dotProduct )
if np.isnan(rotationAngle) or np.abs(rotationAngle) <= PRECISION :
return xyzArray
# calculate rotation axis.
if np.abs(rotationAngle-PI) <= PRECISION:
rotationAxis = get_random_perpendicular_vector(arrayAxis)
else:
rotationAxis = np.cross(alignToAxis, arrayAxis)
#rotationAxis /= np.linalg.norm(rotationAxis)
# calculate rotation matrix
rotationMatrix = get_rotation_matrix(rotationAxis, rotationAngle)
# rotate and return
return rotate(xyzArray , rotationMatrix)
def get_superposition_transformation(refArray, array, check=False):
"""
Calculate the rotation tensor and the translations that minimizes
the root mean square deviation between an array of vectors
and a reference array.
:Parameters:
#. refArray (numpy.ndarray): The NX3 reference array to
superpose to.
#. array (numpy.ndarray): The NX3 array to calculate the
transformation of.
#. check (boolean): Whether to check arguments before
generating points.
:Returns:
#. rotationMatrix (numpy.ndarray): The 3X3 rotation tensor.
#. refArrayCOM (numpy.ndarray): The 1X3 vector center of mass of
refArray.
#. arrayCOM (numpy.ndarray): The 1X3 vector center of mass of array.
#. rms (number)
"""
if check:
# check array
assert isinstance(array, np.ndarray), Logger.error("array must be numpy.ndarray instance")
assert len(array.shape)<=2, Logger.error("array must be a vector or a matrix")
if len(array.shape)==2:
assert array.shape[1]==3, Logger.error("array number of columns must be 3")
else:
assert array.shape[1]==3, Logger.error("vector array number of columns must be 3")
# check refArray
assert isinstance(refArray, np.ndarray), Logger.error("refArray must be numpy.ndarray instance")
assert len(refArray.shape)<=2, Logger.error("refArray must be a vector or a matrix")
if len(refArray.shape)==2:
assert refArray.shape[1]==3, Logger.error("refArray number of columns must be 3")
else:
assert refArray.shape[1]==3, Logger.error("vector refArray number of columns must be 3")
# check weights
assert array.shape == refArray.shape, Logger.error("refArray and array must have the same number of vectors")
# calculate center of mass of array
arrayCOM = np.sum(array, axis=0)/array.shape[0]
# calculate cross matrix and reference config center of mass
r_ref = array-arrayCOM
refArrayCOM = np.sum(refArray, axis=1)
cross = np.dot(refArray.transpose(),r_ref)
possq = np.add.reduce(refArray**2,1)+np.add.reduce(r_ref**2,1)
possq = np.sum(possq)
# calculate kross
kross = np.zeros((4, 4), dtype=FLOAT_TYPE)
kross[0, 0] = -cross[0, 0]-cross[1, 1]-cross[2, 2]
kross[0, 1] = cross[1, 2]-cross[2, 1]
kross[0, 2] = cross[2, 0]-cross[0, 2]
kross[0, 3] = cross[0, 1]-cross[1, 0]
kross[1, 1] = -cross[0, 0]+cross[1, 1]+cross[2, 2]
kross[1, 2] = -cross[0, 1]-cross[1, 0]
kross[1, 3] = -cross[0, 2]-cross[2, 0]
kross[2, 2] = cross[0, 0]-cross[1, 1]+cross[2, 2]
kross[2, 3] = -cross[1, 2]-cross[2, 1]
kross[3, 3] = cross[0, 0]+cross[1, 1]-cross[2, 2]
for i in range(1, 4):
for j in range(i):
kross[i, j] = kross[j, i]
kross = 2.*kross
offset = possq - np.add.reduce(refArrayCOM**2)
for i in range(4):
kross[i, i] = kross[i, i] + offset
# get eigen values
e, v = np.linalg.eig(kross)
i = np.argmin(e)
v = np.array(v[:,i], dtype=FLOAT_TYPE)
if v[0] < 0: v = -v
if e[i] <= 0.:
rms = FLOAT_TYPE(0.)
else:
rms = np.sqrt(e[i])
# calculate the rotation matrix
rot = np.zeros((3,3,4,4), dtype=FLOAT_TYPE)
rot[0,0, 0,0] = FLOAT_TYPE( 1.0)
rot[0,0, 1,1] = FLOAT_TYPE( 1.0)
rot[0,0, 2,2] = FLOAT_TYPE(-1.0)
rot[0,0, 3,3] = FLOAT_TYPE(-1.0)
rot[1,1, 0,0] = FLOAT_TYPE( 1.0)
rot[1,1, 1,1] = FLOAT_TYPE(-1.0)
rot[1,1, 2,2] = FLOAT_TYPE( 1.0)
rot[1,1, 3,3] = FLOAT_TYPE(-1.0)
rot[2,2, 0,0] = FLOAT_TYPE( 1.0)
rot[2,2, 1,1] = FLOAT_TYPE(-1.0)
rot[2,2, 2,2] = FLOAT_TYPE(-1.0)
rot[2,2, 3,3] = FLOAT_TYPE( 1.0)
rot[0,1, 1,2] = FLOAT_TYPE( 2.0)
rot[0,1, 0,3] = FLOAT_TYPE(-2.0)
rot[0,2, 0,2] = FLOAT_TYPE( 2.0)
rot[0,2, 1,3] = FLOAT_TYPE( 2.0)
rot[1,0, 0,3] = FLOAT_TYPE( 2.0)
rot[1,0, 1,2] = FLOAT_TYPE( 2.0)
rot[1,2, 0,1] = FLOAT_TYPE(-2.0)
rot[1,2, 2,3] = FLOAT_TYPE( 2.0)
rot[2,0, 0,2] = FLOAT_TYPE(-2.0)
rot[2,0, 1,3] = FLOAT_TYPE( 2.0)
rot[2,1, 0,1] = FLOAT_TYPE( 2.0)
rot[2,1, 2,3] = FLOAT_TYPE( 2.0)
rotationMatrix = np.dot(np.dot(rot, v), v)
return rotationMatrix, refArrayCOM, arrayCOM, rms
def superpose_array(refArray, array, check=False):
"""
Superpose arrays by calculating the rotation matrix and the
translations that minimize the root mean square deviation between and
array of vectors and a reference array.
:Parameters:
#. refArray (numpy.ndarray): the NX3 reference array to superpose to.
#. array (numpy.ndarray): the NX3 array to calculate the
transformation of.
#. check (boolean): whether to check arguments before generating
points.
:Returns:
#. superposedArray (numpy.ndarray): the NX3 array to superposed array.
"""
rotationMatrix, _,_,_ = get_superposition_transformation(refArray=refArray, array=array, check=check)
return np.dot( rotationMatrix, np.transpose(array).\
reshape(1,3,-1)).transpose().reshape(-1,3)
def generate_random_vector(minAmp, maxAmp):
"""
Generate random vector in 3D.
:Parameters:
#. minAmp (number): Vector minimum amplitude.
#. maxAmp (number): Vector maximum amplitude.
:Returns:
#. vector (numpy.ndarray): the vector [X,Y,Z] array
"""
# generate random vector and ensure it is not zero
vector = np.array(1-2*np.random.random(3), dtype=FLOAT_TYPE)
norm = np.linalg.norm(vector)
if norm == 0:
while norm == 0:
vector = np.array(1-2*np.random.random(3), dtype=FLOAT_TYPE)
norm = np.linalg.norm(vector)
# normalize vector
vector /= FLOAT_TYPE( norm )
# compute baseVector
baseVector = FLOAT_TYPE(vector*minAmp)
# amplify vector
maxAmp = FLOAT_TYPE(maxAmp-minAmp)
vector *= FLOAT_TYPE(generate_random_float()*maxAmp)
vector += baseVector
# return vector
return vector
def generate_points_on_sphere(thetaFrom, thetaTo,
phiFrom, phiTo,
npoints=1,
check=False):
"""
Generate random points on a sphere of radius 1. Points are generated
using spherical coordinates arguments as in figure below. Theta [0,Pi]
is the angle between the generated point and Z axis. Phi [0,2Pi] is the
angle between the generated point and x axis.
.. image:: sphericalCoordsSystem.png
:align: left
:height: 200px
:width: 200px
:Parameters:
#. thetaFrom (number): The minimum theta value.
#. thetaTo (number): The maximum theta value.
#. phiFrom (number): The minimum phi value.
#. phiTo (number): The maximum phi value.
#. npoints (integer): The number of points to generate
#. check (boolean): whether to check arguments before generating points.
:Returns:
#. x (numpy.ndarray): The (npoints,1) numpy array of all generated
points x coordinates.
#. y (numpy.ndarray): The (npoints,1) numpy array of all generated
points y coordinates.
#. z (numpy.ndarray): The (npoints,1) numpy array of all generated
points z coordinates.
"""
if check:
assert isinteger(npoints) , LOGGER.error("npoints must be an integer")
assert is_number(thetaFrom) , LOGGER.error("thetaFrom must be a number")
assert is_number(thetaTo) , LOGGER.error("thetaTo must be a number")
assert is_number(phiFrom) , LOGGER.error("phiFrom must be a number")
assert is_number(phiTo) , LOGGER.error("phiTo must be a number")
npoints = INT_TYPE(npoints)
thetaFrom = FLOAT_TYPE(thetaFrom)
thetaTo = FLOAT_TYPE(thetaTo)
phiFrom = FLOAT_TYPE(phiFrom)
phiTo = FLOAT_TYPE(phiTo)
assert npoints>=1 , LOGGER.error("npoints must be bigger than 0")
assert thetaFrom>=0 , LOGGER.error("thetaFrom must be positive")
assert thetaTo<=PI , LOGGER.error("thetaTo must be smaller than %.6f"%PI)
assert thetaFrom<thetaTo , LOGGER.error("thetaFrom must be smaller than thetaTo")
assert phiFrom>=0 , LOGGER.error("phiFrom must be positive")
assert phiTo<=2*PI , LOGGER.error("phiTo mast be smaller than %.6f"%2*PI)
assert phiFrom<phiTo , LOGGER.error("phiFrom must be smaller than phiTo")
else:
# cast variables
npoints = INT_TYPE(npoints)
thetaFrom = FLOAT_TYPE(thetaFrom)
thetaTo = FLOAT_TYPE(thetaTo)
phiFrom = FLOAT_TYPE(phiFrom)
phiTo = FLOAT_TYPE(phiTo)
# calculate differences
deltaTheta = thetaTo-thetaFrom
deltaPhi = phiTo-phiFrom
# theta
theta = thetaFrom+np.random.random(npoints).astype(FLOAT_TYPE)*deltaTheta
# phi
phi = phiFrom+np.random.random(npoints).astype(FLOAT_TYPE)*deltaPhi
# compute sin and cos
sinTheta = np.sin(theta)
sinPhi = np.sin(phi)
cosTheta = np.cos(theta)
cosPhi = np.cos(phi)
# compute x,y,z
x = sinTheta * cosPhi
y = sinTheta * sinPhi
z = cosTheta
# return
return x,y,z
def find_extrema(x, max = True, min = True, strict = False, withend = False):
"""
Get a vector extrema indexes and values.
:Parameters:
#. max (boolean): Whether to index the maxima.
#. min (boolean): Whether to index the minima.
#. strict (boolean): Whether not to index changes to zero gradient.
#. withend (boolean): Whether to always include x[0] and x[-1].
:Returns:
#. indexes (numpy.ndarray): Extrema indexes.
#. values (numpy.ndarray): Extrema values.
"""
# This is the gradient
dx = np.empty(len(x))
dx[1:] = np.diff(x)
dx[0] = dx[1]
# Clean up the gradient in order to pick out any change of sign
dx = np.sign(dx)
# define the threshold for whether to pick out changes to zero gradient
threshold = 0
if strict:
threshold = 1
# Second order diff to pick out the spikes
d2x = np.diff(dx)
if max and min:
d2x = abs(d2x)
elif max:
d2x = -d2x
# Take care of the two ends
if withend:
d2x[0] = 2
d2x[-1] = 2
# Sift out the list of extremas
ind = np.nonzero(d2x > threshold)[0]
return ind, x[ind]
def convert_Gr_to_gr(Gr, minIndex):
"""
Converts G(r) to g(r) by computing the following
:math:`g(r)=1+(\\frac{G(r)}{4 \\pi \\rho_{0} r})`
:Parameters:
#. Gr (numpy.ndarray): The G(r) numpy array of shape
(number of points, 2)
#. minIndex (int, tuple): The minima indexes to compute the
number density rho0. It can be a single peak or a list of peaks to
compute the mean slope instead.
:Returns:
#. minimas (numpy.ndarray): The minimas array found using minIndex
and used to compute the slope and therefore :math:`\\rho_{0}`.
#. slope (float): The computed slope from the minimas.
#. rho0 (float): The number density of the material.
#. g(r) (numpy.ndarray): the computed g(r).
**To visualize convertion**
.. code-block:: python
# peak indexes can be different, adjust according to your data
minPeaksIndex = [1,3,4]
minimas, slope, rho0, gr = convert_Gr_to_gr(Gr, minIndex=minPeaksIndex)
print('slope: %s --> rho0: %s'%(slope,rho0))
import matplotlib.pyplot as plt
line = np.transpose( [[0, Gr[-1,0]], [0, slope*Gr[-1,0]]] )
plt.plot(Gr[:,0],Gr[:,1], label='G(r)')
plt.plot(minimas[:,0], minimas[:,1], 'o', label='minimas')
plt.plot(line[:,0], line[:,1], label='density')
plt.plot(gr[:,0],gr[:,1], label='g(r)')
plt.legend()
plt.show()
"""
# check G(r)
assert isinstance(Gr, np.ndarray), "Gr must be a numpy.ndarray"
assert len(Gr.shape)==2, "Gr be of shape length 2"
assert Gr.shape[1] == 2, "Gr must be of shape (n,2)"
# check minIndex
if not isinstance(minIndex, (list, set, tuple)):
minIndex = [minIndex]
else:
minIndex = list(minIndex)
for idx, mi in enumerate(minIndex):
assert is_integer(mi), "minIndex must be integers"
mi = int(mi)
assert mi>=0, "minIndex must be >0"
minIndex[idx] = mi
# get minsIndexes
minsIndexes = find_extrema(x=Gr[:,1], max=False)[0]
# get minimas points
minIndex = [minsIndexes[mi] for mi in minIndex]
minimas = np.transpose([Gr[minIndex,0], Gr[minIndex,1]])
# compute slope
x = float( np.mean(minimas[:,0]) )
y = float( np.mean(minimas[:,1]) )
slope = y/x
# compute rho
rho0 = -slope/4./np.pi
# compute g(r)
gr = 1+Gr[:,1]/(-slope*Gr[:,0])
gr = np.transpose( [Gr[:,0], gr] )
# return
return minimas, slope, rho0, gr
def generate_vectors_in_solid_angle(direction,
maxAngle,
numberOfVectors=1,
check=False):
"""
Generate random vectors that satisfy angle condition with a
direction vector. Angle between any generated vector and direction must
be smaller than given maxAngle.
+----------------------------------------+----------------------------------------+----------------------------------------+
|.. figure:: 100randomVectors30deg.png |.. figure:: 200randomVectors45deg.png |.. figure:: 500randomVectors100deg.png |
| :width: 400px | :width: 400px | :width: 400px |
| :height: 300px | :height: 300px | :height: 300px |
| :align: left | :align: left | :align: left |
| | | |
| a) 100 vectors generated around | b) 200 vectors generated around | b) 500 vectors generated around |
| OX axis within a maximum angle | [1,-1,1] axis within a maximum angle | [2,5,1] axis within a maximum angle |
| separation of 30 degrees. | separation of 45 degrees. | separation of 100 degrees. |
+----------------------------------------+----------------------------------------+----------------------------------------+
:Parameters:
#. direction (number): The direction around which to create the vectors.
#. maxAngle (number): The maximum angle allowed.
#. numberOfVectors (integer): The number of vectors to generate.
#. check (boolean): whether to check arguments before generating
vectors.
:Returns:
#. vectors (numpy.ndarray): The (numberOfVectors,3) numpy array of
generated vectors.
"""
if check:
assert isinstance(direction, (list,set,tuple,np.array)), LOGGER.error("direction must be a vector like list or array of length 3")
direction = list(direction)
assert len(direction)==3, LOGGER.error("direction vector must be of length 3")
dir = []
for d in direction:
assert is_number(d), LOGGER.error("direction items must be numbers")
dir.append(FLOAT_TYPE(d))
direction = np.array(dir)
assert direction[0]>PRECISION or direction[1]>PRECISION or direction[2]>PRECISION, LOGGER.error("direction must be a non zero vector")
assert is_number(maxAngle), LOGGER.error("maxAngle must be a number")
maxAngle = FLOAT_TYPE(maxAngle)
assert maxAngle>0, LOGGER.error("maxAngle must be bigger that zero")
assert maxAngle<=PI, LOGGER.error("maxAngle must be smaller than %.6f"%PI)
assert is_integer(numberOfVectors), LOGGER.error("numberOfVectors must be integer")
numberOfVectors = INT_TYPE(numberOfVectors)
assert numberOfVectors>0, LOGGER.error("numberOfVectors must be bigger than 0")
# create axis
axis = np.array([1,0,0])
if np.abs(direction[1])<=PRECISION and np.abs(direction[2])<=PRECISION:
axis *= np.sign(direction[0])
# create vectors
vectors = np.zeros((numberOfVectors,3))
vectors[:,1] = 1-2*np.random.random(numberOfVectors)
vectors[:,2] = 1-2*np.random.random(numberOfVectors)#np.sign(np.random.random(numberOfVectors)-0.5)*np.sqrt(1-vectors[:,1]**2)
norm = np.sqrt(np.add.reduce(vectors**2, axis=1))
vectors[:,1] /= norm
vectors[:,2] /= norm
# create rotation angles
rotationAngles = PI/2.-np.random.random(numberOfVectors)*maxAngle
# create rotation axis
rotationAxes = np.cross(axis, vectors)
# rotate vectors to axis
for idx in range(numberOfVectors):
rotationMatrix = get_rotation_matrix(rotationAxes[idx,:], rotationAngles[idx])
vectors[idx,:] = np.dot( rotationMatrix, vectors[idx,:])
# normalize direction
direction /= np.linalg.norm(direction)
# get rotation matrix of axis to direction
rotationAngle = np.arccos( np.dot(direction, axis) )
if rotationAngle > PRECISION:
rotationAxis = np.cross(direction, axis)
rotationMatrix = get_rotation_matrix(rotationAxis, rotationAngle)
# rotate vectors to direction
for idx in range(numberOfVectors):
vectors[idx,:] = np.dot( rotationMatrix, vectors[idx,:])
return vectors.astype(FLOAT_TYPE)
def gaussian(x, center=0, FWHM=1, normalize=True, check=True):
"""
Compute the normal distribution or gaussian distribution of a given vector.
The probability density of the gaussian distribution is:
:math:`f(x,\\mu,\\sigma) = \\frac{1}{\\sigma\\sqrt{2\\pi}} e^{\\frac{-(x-\\mu)^{2}}{2\\sigma^2}}`
Where:\n
* :math:`\\mu` is the center of the gaussian, it is the mean or
expectation of the distribution it is called the distribution's
median or mode.
* :math:`\\sigma` is its standard deviation.
* :math:`FWHM=2\\sqrt{2 ln 2} \\sigma` is the Full Width at Half
Maximum of the gaussian.
:Parameters:
#. x (numpy.ndarray): The vector to compute the gaussian
#. center (number): The center of the gaussian.
#. FWHM (number): The Full Width at Half Maximum of the gaussian.
#. normalize(boolean): Whether to normalize the generated gaussian
by :math:`\\frac{1}{\\sigma\\sqrt{2\\pi}}` so the integral
is equal to 1.
#. check (boolean): whether to check arguments before generating
vectors.
"""
if check:
assert is_number(center), LOGGER.error("center must be a number")
center = FLOAT_TYPE(center)
assert is_number(FWHM), LOGGER.error("FWHM must be a number")
FWHM = FLOAT_TYPE(FWHM)
assert FWHM>0, LOGGER.error("FWHM must be bigger than 0")
assert isinstance(normalize, bool), LOGGER.error("normalize must be boolean")
sigma = FWHM/(2.*np.sqrt(2*np.log(2)))
expKernel = ((x-center)**2) / (-2*sigma**2)
exp = np.exp(expKernel)
scaleFactor = 1.
if normalize:
scaleFactor /= sigma*np.sqrt(2*np.pi)
return (scaleFactor * exp).astype(FLOAT_TYPE)
def step_function(x, center=0, FWHM=0.1, height=1, check=True):
"""
Compute a step function as the cumulative summation of a gaussian
distribution of a given vector.
:Parameters:
#. x (numpy.ndarray): The vector to compute the gaussian. gaussian
is computed as a function of x.
#. center (number): The center of the step function which is the
the center of the gaussian.
#. FWHM (number): The Full Width at Half Maximum of the gaussian.
#. height (number): The height of the step function.
#. check (boolean): whether to check arguments before generating
vectors.
"""
if check:
assert is_number(height), LOGGER.error("height must be a number")
height = FLOAT_TYPE(height)
g = gaussian(x, center=center, FWHM=FWHM, normalize=False, check=check)
sf = np.cumsum(g)
sf /= sf[-1]
return (sf*height).astype(FLOAT_TYPE)
class ListenerBase(object):
"""All listeners base class."""
def __init__(self):
self.__listenerId = str(uuid.uuid1())
@property
def listenerId(self):
""" Listener unique id set at initialization"""
return self.__listenerId
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's listen method.
#. arguments (object): Any python object.
"""
pass
class _Container(object):
"""
This is a general objects container that is used by the engine to
contain a unique instance for all objects that are prone for redundancy
such as swapLists in SwapGenerators.
"""
# This is for internal usage only
__FIRST_INIT = True
def __new__(cls, *args, **kwargs):
#Singleton interface
thisSingleton = cls.__dict__.get("_thisSingleton__")
if thisSingleton is not None:
return thisSingleton
cls._thisSingleton__ = thisSingleton = object.__new__(cls, *args, **kwargs)
return thisSingleton
def __init__(self):
# this if statment insures initializing _Container only once.
if _Container.__FIRST_INIT:
self.__containers = {}
self.__hints = {}
_Container.__FIRST_INIT = False
def __getstate__(self):
"""no need to store hints dict"""
self.__hints = {}
return self.__dict__
@property
def containersName(self):
"""The containers name list."""
return list(self.__containers)
@property
def containers(self):
"""The containers dictionary"""
return self.__containers
@property
def hints(self):
"""The containers dictionary"""
return self.__hints
def get_location_by_hint(self, hint):
"""
Get stored object in container using a hint.
:Parameters:
#. hint (object): This is used to fetch for stored object.
Sometimes objects are parsed and modified before being set.
In this particular case, those object will be passed as hint
and then later used to find stored object after modification.
:Returns:
#. location (None, tuple): The object location given a hint.
If hint not found, the None is returned
"""
for location in self.__hints:
h = self.__hints[location]
if h is hint:
return location
# return None when nothing is found
return None
def is_container(self, name):
"""
Check whether container exists given its name.
:Parameters:
#. name (string): The container's name.
:Returns:
#. result (boolean): Whether container exists.
"""
return name in self.__containers
def assert_location(self, location):
"""
Checks location exists. If it doesn't, an error will be raised
:Parameters:
#. location (tuple): Location tuple as (name, key) that points
to the value. It's typically implemented as such
containers[name][key]
"""
assert isinstance(location, (list, tuple)), LOGGER.error("location must be a tuple")
assert len(location) == 2, LOGGER.error("location must contain 2 items")
name, uniqueKey = location
assert self.is_container(name), LOGGER.error("container name '%s' doesn't exists"%name)
assert uniqueKey in self.__containers[name], LOGGER.error("container name '%s' key '%s' doesn't exist"%(name,uniqueKey))
def add_container(self, name):
"""
Add a container to the containers dict.
:Parameters:
#. name (string): The container's name.
"""
assert isinstance(name, basestring), "name must be a string"
assert not self.is_container(name), LOGGER.error("container name '%s' already exists"%name)
self.__containers[name] = {}
def pop_container(self, name):
"""
Pop and return a containerfrom containers dict.
:Parameters:
#. name (string): The container's name.
:Returns:
#. value (list): The container value.
"""
assert isinstance(name, basestring), "name must be a string"
assert self.is_container(name), LOGGER.error("container name '%s' doesn't exists"%name)
return self.__containers.pop(name)
def add_to_container(self, container, value, hint=None):
"""
Add a value to a container. If stored correctly a location tuple
(name, uuid) will be returned. This location tuple is needed to locate
and get the data when needed.
:Parameters:
#. container (string): The container's name.
#. value (object): the value object to add to the container's.
#. hint (object): hint used to fetch for stored object.
Sometimes objects are parsed and modified before being set.
In this particular case, those object will be passed as hint
and then later used to find stored object after modification.
:Returns:
#. location (tuple): Location tuple as (container, key) that
points to the value. It's typically implemented as such
containers[container][key]
"""
assert self.is_container(container), LOGGER.error("container name '%s' doesn't exists"%container)
uniqueKey = str(uuid.uuid1())
self.__containers[container][uniqueKey] = value
# create location
location = (container, uniqueKey)
# add hint
if hint is not None:
self.__hints[location] = hint
# return location
return location
def set_value(self, *args, **kwargs):
"""alias to add_to_container"""
return self.add_to_container(*args, **kwargs)
def get_value(self, location):
"""
Get stored value in container.
:Parameters:
#. location (tuple): Location tuple as (name, key) that points
to the value. It's typically implemented as such
containers[name][key]
:Returns:
#. value (object): the value object to add to the container's list.
"""
self.assert_location(location)
name, uniqueKey = location
return self.__containers[name][uniqueKey]
def update_value(self, location, value):
"""
Udpdate an existing value.
:Parameters:
#. location (tuple): Location tuple as (name, key) that points to
the value. It's typically implemented as such
containers[name][key]
#. value (object): the value object to add to the container's.
"""
self.assert_location(location)
name, uniqueKey = location
self.__containers[name][uniqueKey] = value
_Container()
class _AtomsCollector(object):
"""
Atoms collector manages collecting atoms data whenever they are
removed from from system. This mechanism allows storing and recovering
atoms data at engine runtime.
:Parameters:
#. dataKeys (None, list): The data keys list promised to store
everytime an atom is removed from the system. If None is given,
dataKeys must be set later using set_data_keys method.
"""
# internal usage only
def __init__(self, parent, dataKeys=None):
# set parent
assert callable( getattr(parent, "_on_collector_collect_atom", None) ), LOGGER.error("_AtomsCollector parent must have '_on_collector_collect_atom' method")
assert callable( getattr(parent, "_on_collector_release_atom", None) ), LOGGER.error("_AtomsCollector parent must have '_on_collector_release_atom' method")
assert callable( getattr(parent, "_on_collector_reset", None) ), LOGGER.error("_AtomsCollector parent must have '_on_collector_reset' method")
self.__parent = parent
# set data keys tuple
self.__dataKeys = ()
if dataKeys is not None:
self.set_data_keys(dataKeys)
# set all properties
self.reset()
def __len__(self):
return len(self.__collectedData)
@property
def dataKeys(self):
"""Data keys that this collector will collect."""
return self.__dataKeys
@property
def indexes(self):
"""Collected atoms indexes."""
return list(self.__collectedData)
@property
def indexesSortedArray(self):
"""Collected atoms sorted indexes numpy array."""
return self.__indexesSortedArray
@property
def state(self):
"""Current collector state that will only change upon reseting,
collecting or releasing atoms."""
return self.__state
def reset(self):
"""
Reset collector to initial state by releasing all collected atoms
and re-initializing all properties. parent._on_collector_reset
method is not called.
"""
# init indexes sorted array
self.__indexesSortedArray = np.array([])
# initialize collected data dictionary
self.__collectedData = {}
# set random data that can be used to collect random data at any time.
# must be used only internally.
self._randomData = None
# set state
self.__state = str(uuid.uuid1())
def get_collected_data(self):
"""
Get stored collected data.
:Returns:
#. collectedData (dict): The collected data dictionary.
"""
return self.__collectedData
def is_collected(self, index):
"""
Get whether atom is collected given its index.
:Parameters:
#. index (int): The atom index to check whether it is
collected or not.
:Returns:
#. result (bool): Whether it is collected or not.
"""
return index in self.__collectedData
def is_not_collected(self, index):
"""
Get whether atom is not collected given its index.
:Parameters:
#. index (int): The atom index to check whether it is
collected or not.
:Returns:
#. result (bool): Whether it is not collected or it is.
"""
return not index in self.__collectedData
def are_collected(self, indexes):
"""
Get whether atoms are collected given their indexes.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (list): List of booleans defining whether atoms are
collected or not.
"""
return [idx in self.__collectedData for idx in indexes]
def any_collected(self, indexes):
"""
Get whether any atom in indexes is collected.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (boolean): Whether any collected atom is found collected.
"""
for idx in indexes:
if idx in self.__collectedData:
return True
return False
def any_not_collected(self, indexes):
"""
Get whether any atom in indexes is not collected.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (boolean): Whether any collected atom is not found
collected.
"""
for idx in indexes:
if not idx in self.__collectedData:
return True
return False
def all_collected(self, indexes):
"""
Get whether all atoms in indexes are collected.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (boolean): Whether all atoms are collected.
"""
for idx in indexes:
if not idx in self.__collectedData:
return False
return True
def all_not_collected(self, indexes):
"""
Get whether all atoms in indexes are not collected.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (boolean): Whether all atoms are not collected.
"""
for idx in indexes:
if idx in self.__collectedData:
return False
return True
def are_not_collected(self, indexes):
"""
Get whether atoms are not collected given their indexes.
:Parameters:
#. indexes (list,set,tuple, numpy.ndarray): Atoms indexes to
check whether they are collected or not.
:Returns:
#. result (list): List of booleans defining whether atoms are
not collected or they are.
"""
return [not idx in self.__collectedData for idx in indexes]
def set_data_keys(self, dataKeys):
"""
Set atoms collector data keys. keys can be set only once.
This method will throw an error if used to reset dataKeys.
:Parameters:
#. dataKeys (list): The data keys list promised to store everytime
an atom is removed from the system.
"""
assert not len(self.__dataKeys), LOGGER.error("resetting dataKeys is not allowed")
assert isinstance(dataKeys, (list, set, tuple)), LOGGER.error("dataKeys must be a list")
assert len(set(dataKeys))==len(dataKeys), LOGGER.error("Redundant keys in dataKeys list are found")
assert len(dataKeys)>0, LOGGER.error("dataKeys must not be empty")
self.__dataKeys = tuple(sorted(dataKeys))
def get_relative_index(self, index):
"""
Compute relative atom index considering already collected atoms.
:Parameters:
#. index (int): Atom index.
:Returns:
#. relativeIndex (int): Atom relative index.
"""
position = np.searchsorted(a=self.__indexesSortedArray, v=index, side='left').astype(INT_TYPE)
return index-position
def get_relative_indexes(self, indexes):
"""
Compute relative atoms index considering already collected atoms.
:Parameters:
#. indexes (list,set,tuple,numpy.ndarray): Atoms index.
:Returns:
#. relativeIndexes (list): Atoms relative index.
"""
positions = np.searchsorted(a=self.__indexesSortedArray, v=indexes, side='left').astype(INT_TYPE)
return indexes-positions
def get_real_index(self, relativeIndex):
"""
Compute real index of the given relativeIndex considering
already collected indexes.
:Parameters:
#. relativeIndex (int): Atom relative index to already collected
indexes.
:Parameters:
#. index (int): Atom real index.
"""
### THIS IS NOT TESTED YET.
indexes = np.array( sorted(self.indexes) )
shift = np.searchsorted(a=indexes, v=relativeIndex, side='left')
index = relativeIndex+shift
for idx in indexes[shift:]:
if idx > index:
break
index += 1
return index
def get_atom_data(self, index):
"""
Get collected atom data.
:Parameters:
#. index (int): The atom index to return its collected data.
"""
return self.__collectedData[index]
def get_data_by_key(self, key):
"""
Get all collected atoms data that is associated with a key.
:Parameters:
#. key (int): the data key.
:Parameters:
#. data (dict): dictionary of atoms indexes where values are the collected data.
"""
data = {}
for k in self.__collectedData:
data[k] = self.__collectedData[k][key]
return data
def collect(self, index, dataDict, check=True):
"""
Collect atom given its index.
:Parameters:
#. index (int): The atom index to collect.
#. dataDict (dict): The atom data dict to collect.
#. check (boolean): Whether to check dataDict keys before
collecting. If set to False, user promises that collected
data is a dictionary and contains the needed keys.
"""
assert not self.is_collected(index), LOGGER.error("attempting to collect and already collected atom of index '%i'"%index)
# add data
if check:
assert isinstance(dataDict, dict), LOGGER.error("dataDict must be a dictionary of data where keys are dataKeys")
assert tuple(sorted(dataDict)) == self.__dataKeys, LOGGER.error("dataDict keys don't match promised dataKeys")
self.__collectedData[index] = dataDict
# set indexes sorted array
idx = np.searchsorted(a=self.__indexesSortedArray, v=index, side='left')
self.__indexesSortedArray = np.insert(self.__indexesSortedArray, idx, index)
# set state
self.__state = str(uuid.uuid1())
def release(self, index):
"""
Release atom from list of collected atoms and return its
collected data.
:Parameters:
#. index (int): The atom index to release.
:Returns:
#. dataDict (dict): The released atom collected data.
"""
if not self.is_collected(index):
LOGGER.warn("Attempting to release atom %i that is not collected."%index)
return
index = self.__collectedData.pop(index)
# set indexes sorted array
idx = np.searchsorted(a=self.__indexesSortedArray, v=index, side='left')
self.__indexesSortedArray = np.insert(self.__indexesSortedArray, idx, index)
# set state
self.__state = str(uuid.uuid1())
# return
return index
class Broadcaster(object):
"""
A broadcaster broadcasts a message to all registered listener.
"""
def __init__(self):
self.__listeners = []
@property
def listeners(self):
"""Listeners list copy."""
return [l for l in self.__listeners]
def add_listener(self, listener):
"""
Add listener to the list of listeners.
:Parameters:
#. listener (object): Any python object having a listen method.
"""
assert isinstance(listener, ListenerBase), LOGGER.error("listener must be a ListenerBase instance.")
if listener not in self.__listeners:
self.__listeners.append(listener)
else:
LOGGER.warn("listener already exist in broadcaster list")
def remove_listener(self, listener):
"""
Remove listener to the list of listeners.
:Parameters:
#. listener (object): The listener object to remove.
"""
assert isinstance(listener, ListenerBase), LOGGER.error("listener must be a ListenerBase instance.")
# remove listener
self.__listeners = [l for l in self.__listeners if l.listenerId != listener.listenerId]
def broadcast(self, message, arguments=None):
"""
Broadcast a message to all the listeners
:Parameters:
#. message (object): Any type of message object to pass to
the listeners.
#. arguments (object): Any type of argument to pass to the
listeners.
"""
for l in self.__listeners:
l.listen(message, arguments)
class RandomFloatGenerator(object):
"""
Generate random float number between a lower and an upper limit.
:Parameters:
#. lowerLimit (number): The lower limit allowed.
#. upperLimit (number): The upper limit allowed.
"""
def __init__(self, lowerLimit, upperLimit):
self.__lowerLimit = None
self.__upperLimit = None
self.set_lower_limit(lowerLimit)
self.set_upper_limit(upperLimit)
@property
def lowerLimit(self):
"""Lower limit of the number generation."""
return self.__lowerLimit
@property
def upperLimit(self):
"""Upper limit of the number generation."""
return self.__upperLimit
@property
def rang(self):
"""Range defined as upperLimit-lowerLimit."""
return self.__rang
def set_lower_limit(self, lowerLimit):
"""
Set lower limit.
:Parameters:
#. lowerLimit (number): Lower limit allowed.
"""
assert is_number(lowerLimit), LOGGER.error("lowerLimit must be numbers")
self.__lowerLimit = FLOAT_TYPE(lowerLimit)
if self.__upperLimit is not None:
assert self.__lowerLimit<self.__upperLimit, LOGGER.error("lower limit must be smaller than the upper one")
self.__rang = FLOAT_TYPE(self.__upperLimit-self.__lowerLimit)
def set_upper_limit(self, upperLimit):
"""
Set upper limit.
:Parameters:
#. upperLimit (number): Upper limit allowed.
"""
assert is_number(upperLimit), LOGGER.error("upperLimit must be numbers")
self.__upperLimit = FLOAT_TYPE(upperLimit)
if self.__lowerLimit is not None:
assert self.__lowerLimit<self.__upperLimit, LOGGER.error("lower limit must be smaller than the upper one")
self.__rang = FLOAT_TYPE(self.__upperLimit-self.__lowerLimit)
def generate(self):
"""Generate a random float number between lowerLimit and upperLimit."""
return FLOAT_TYPE(self.__lowerLimit+generate_random_float()*self.__rang)
class BiasedRandomFloatGenerator(RandomFloatGenerator):
"""
Generate biased random float number between a lower and an upper limit.
To bias the generator at a certain number, a bias gaussian is added to the
weights scheme at the position of this particular number.
.. image:: biasedFloatGenerator.png
:align: center
:Parameters:
#. lowerLimit (number): The lower limit allowed.
#. upperLimit (number): The upper limit allowed.
#. weights (None, list, numpy.ndarray): The weights scheme.
The length defines the number of bins and the edges.
The length of weights array defines the resolution of the biased
numbers generation. If None is given, ones array of length 10000
is automatically generated.
#. biasRange(None, number): The bias gaussian range.
It must be smaller than half of limits range which is equal to
(upperLimit-lowerLimit)/2. If None is given, it will be
automatically set to (upperLimit-lowerLimit)/5
#. biasFWHM(None, number): The bias gaussian Full Width at Half Maximum.
It must be smaller than half of biasRange.
If None, it will be automatically set to biasRange/10
#. biasHeight(number): The bias gaussian maximum intensity.
#. unbiasRange(None, number): The bias gaussian range.
It must be smaller than half of limits range which is equal to
(upperLimit-lowerLimit)/2. If None is given, it will be
automatically set to biasRange.
#. unbiasFWHM(None, number): The bias gaussian Full Width at Half
Maximum. It must be smaller than half of biasRange.
If None is given, it will be automatically set to biasFWHM.
#. unbiasHeight(number): The unbias gaussian maximum intensity.
If None is given, it will be automatically set to biasHeight.
#. unbiasThreshold(number): unbias is only applied at a certain
position only when the position weight is above unbiasThreshold.
It must be a positive number.
"""
def __init__(self, lowerLimit, upperLimit,
weights=None,
biasRange=None, biasFWHM=None, biasHeight=1,
unbiasRange=None, unbiasFWHM=None, unbiasHeight=None, unbiasThreshold=1):
# initialize random generator
super(BiasedRandomFloatGenerator, self).__init__(lowerLimit=lowerLimit, upperLimit=upperLimit)
# set scheme
self.set_weights(weights)
# set bias function
self.set_bias(biasRange=biasRange, biasFWHM=biasFWHM, biasHeight=biasHeight)
# set unbias function
self.set_unbias(unbiasRange=unbiasRange, unbiasFWHM=unbiasFWHM, unbiasHeight=unbiasHeight, unbiasThreshold=unbiasThreshold)
@property
def originalWeights(self):
"""Original weights as initialized."""
return self.__originalWeights
@property
def weights(self):
"""Current value weights vector."""
weights = self.__scheme[1:]-self.__scheme[:-1]
weights = list(weights)
weights.insert(0,self.__scheme[0])
return weights
@property
def scheme(self):
"""Numbers generation scheme."""
return self.__scheme
@property
def bins(self):
"""Number of bins that is equal to the length of weights vector."""
return self.__bins
@property
def binWidth(self):
"""Bin width defining the resolution of the biased random
number generation."""
return self.__binWidth
@property
def bias(self):
"""Bias step-function."""
return self.__bias
@property
def biasGuassian(self):
"""Bias gaussian function."""
return self.__biasGuassian
@property
def biasRange(self):
"""Bias gaussian extent range."""
return self.__biasRange
@property
def biasBins(self):
"""Bias gaussian number of bins."""
return self.__biasBins
@property
def biasFWHM(self):
"""Bias gaussian Full Width at Half Maximum."""
return self.__biasFWHM
@property
def biasFWHMBins(self):
"""Bias gaussian Full Width at Half Maximum number of bins."""
return self.__biasFWHMBins
@property
def unbias(self):
"""Unbias step-function."""
return self.__unbias
@property
def unbiasGuassian(self):
"""Unbias gaussian function."""
return self.__unbiasGuassian
@property
def unbiasRange(self):
"""Unbias gaussian extent range."""
return self.__unbiasRange
@property
def unbiasBins(self):
"""Unbias gaussian number of bins."""
return self.__unbiasBins
@property
def unbiasFWHM(self):
"""Unbias gaussian Full Width at Half Maximum."""
return self.__unbiasFWHM
@property
def unbiasFWHMBins(self):
"""Unbias gaussian Full Width at Half Maximum number of bins."""
return self.__unbiasFWHMBins
def set_weights(self, weights=None):
"""
Set generator's weights.
:Parameters:
#. weights (None, list, numpy.ndarray): The weights scheme.
The length defines the number of bins and the edges.
The length of weights array defines the resolution of the
biased numbers generation. If None is given, ones array of
length 10000 is automatically generated.
"""
# set original weights
if weights is None:
self.__bins = 10000
self.__originalWeights = np.ones(self.__bins)
else:
assert isinstance(weights, (list, set, tuple, np.ndarray)), LOGGER.error("weights must be a list of numbers")
if isinstance(weights, np.ndarray):
assert len(weights.shape)==1, LOGGER.error("weights must be uni-dimensional")
wgts = []
assert len(weights)>=100, LOGGER.error("weights minimum length allowed is 100")
for w in weights:
assert is_number(w), LOGGER.error("weights items must be numbers")
w = FLOAT_TYPE(w)
assert w>=0, LOGGER.error("weights items must be positive")
wgts.append(w)
self.__originalWeights = np.array(wgts, dtype=FLOAT_TYPE)
self.__bins = len(self.__originalWeights)
# set bin width
self.__binWidth = FLOAT_TYPE(self.rang/self.__bins)
self.__halfBinWidth = FLOAT_TYPE(self.__binWidth/2.)
# set scheme
self.__scheme = np.cumsum( self.__originalWeights )
def set_bias(self, biasRange, biasFWHM, biasHeight):
"""
Set generator's bias gaussian function
:Parameters:
#. biasRange(None, number): Bias gaussian range. It must be
smaller than half of limits range which is equal to
(upperLimit-lowerLimit)/2. If None is given, it will
be automatically set to (upperLimit-lowerLimit)/5.
#. biasFWHM(None, number): Bias gaussian Full Width at Half
Maximum. It must be smaller than half of biasRange.
If None is given, it will be automatically set to biasRange/10.
#. biasHeight(number): Bias gaussian maximum intensity.
"""
# check biasRange
if biasRange is None:
biasRange = FLOAT_TYPE(self.rang/5.)
else:
assert is_number(biasRange), LOGGER.error("biasRange must be numbers")
biasRange = FLOAT_TYPE(biasRange)
assert biasRange>0, LOGGER.error("biasRange must be positive")
assert biasRange<=self.rang/2., LOGGER.error("biasRange must be smaller than 50%% of limits range which is %s in this case"%str(self.rang))
self.__biasRange = FLOAT_TYPE(biasRange)
self.__biasBins = INT_TYPE(self.bins*self.__biasRange/self.rang)
# check biasFWHM
if biasFWHM is None:
biasFWHM = FLOAT_TYPE(self.__biasRange/10.)
else:
assert is_number(biasFWHM), LOGGER.error("biasFWHM must be numbers")
biasFWHM = FLOAT_TYPE(biasFWHM)
assert biasFWHM>=0, LOGGER.error("biasFWHM must be positive")
assert biasFWHM<=self.__biasRange/2., LOGGER.error("biasFWHM must be smaller than 50%% of bias range which is %s in this case"%str(self.__biasRange))
self.__biasFWHM = FLOAT_TYPE(biasFWHM)
self.__biasFWHMBins = INT_TYPE(self.bins*self.__biasFWHM/self.rang)
# check height
assert is_number(biasHeight), LOGGER.error("biasHeight must be a number")
self.__biasHeight = FLOAT_TYPE(biasHeight)
assert self.__biasHeight>=0, LOGGER.error("biasHeight must be positive")
# create bias step function
b = self.__biasRange/self.__biasBins
x = [-self.__biasRange/2.+idx*b for idx in range(self.__biasBins) ]
self.__biasGuassian = gaussian(x, center=0, FWHM=self.__biasFWHM, normalize=False)
self.__biasGuassian -= self.__biasGuassian[0]
self.__biasGuassian /= np.max(self.__biasGuassian)
self.__biasGuassian *= self.__biasHeight
self.__bias = np.cumsum(self.__biasGuassian)
def set_unbias(self, unbiasRange, unbiasFWHM, unbiasHeight, unbiasThreshold):
"""
Set generator's unbias gaussian function
:Parameters:
#. unbiasRange(None, number): The bias gaussian range.
It must be smaller than half of limits range which is equal to
(upperLimit-lowerLimit)/2. If None, it will be automatically set
to biasRange.
#. unbiasFWHM(None, number): The bias gaussian Full Width at Half
Maximum. It must be smaller than half of biasRange.
If None is given, it will be automatically set to biasFWHM.
#. unbiasHeight(number): The unbias gaussian maximum intensity.
If None is given, it will be automatically set to biasHeight.
#. unbiasThreshold(number): unbias is only applied at a certain
position only when the position weight is above
unbiasThreshold. It must be a positive number.
"""
# check biasRange
if unbiasRange is None:
unbiasRange = self.__biasRange
else:
assert is_number(unbiasRange), LOGGER.error("unbiasRange must be numbers")
unbiasRange = FLOAT_TYPE(unbiasRange)
assert unbiasRange>0, LOGGER.error("unbiasRange must be positive")
assert unbiasRange<=self.rang/2., LOGGER.error("unbiasRange must be smaller than 50%% of limits range which is %s in this case"%str(self.rang))
self.__unbiasRange = FLOAT_TYPE(unbiasRange)
self.__unbiasBins = INT_TYPE(self.bins*self.__unbiasRange/self.rang)
# check biasFWHM
if unbiasFWHM is None:
unbiasFWHM = self.__biasFWHM
else:
assert is_number(unbiasFWHM), LOGGER.error("unbiasFWHM must be numbers")
unbiasFWHM = FLOAT_TYPE(unbiasFWHM)
assert unbiasFWHM>=0, LOGGER.error("unbiasFWHM must be positive")
assert unbiasFWHM<=self.__unbiasRange/2., LOGGER.error("unbiasFWHM must be smaller than 50%% of bias range which is %s in this case"%str(self.__biasRange))
self.__unbiasFWHM = FLOAT_TYPE(unbiasFWHM)
self.__unbiasFWHMBins = INT_TYPE(self.bins*self.__unbiasFWHM/self.rang)
# check height
if unbiasHeight is None:
unbiasHeight = self.__biasHeight
assert is_number(unbiasHeight), LOGGER.error("unbiasHeight must be a number")
self.__unbiasHeight = FLOAT_TYPE(unbiasHeight)
assert self.__unbiasHeight>=0, LOGGER.error("unbiasHeight must be bigger than 0")
# check unbiasThreshold
assert is_number(unbiasThreshold), LOGGER.error("unbiasThreshold must be a number")
self.__unbiasThreshold = FLOAT_TYPE(unbiasThreshold)
assert self.__unbiasThreshold>=0, LOGGER.error("unbiasThreshold must be positive")
# create bias step function
b = self.__unbiasRange/self.__unbiasBins
x = [-self.__unbiasRange/2.+idx*b for idx in range(self.__unbiasBins) ]
self.__unbiasGuassian = gaussian(x, center=0, FWHM=self.__unbiasFWHM, normalize=False)
self.__unbiasGuassian -= self.__unbiasGuassian[0]
self.__unbiasGuassian /= np.max(self.__unbiasGuassian)
self.__unbiasGuassian *= -self.__unbiasHeight
self.__unbias = np.cumsum(self.__unbiasGuassian)
def bias_scheme_by_index(self, index, scaleFactor=None, check=True):
"""
Bias the generator's scheme using the defined bias gaussian
function at the given index.
:Parameters:
#. index(integer): The index of the position to bias
#. scaleFactor(None, number): Whether to scale the bias gaussian
before biasing the scheme. If None is given, bias gaussian is
used as defined.
#. check(boolean): Whether to check arguments.
"""
if not self.__biasHeight>0: return
if check:
assert is_integer(index), LOGGER.error("index must be an integer")
index = INT_TYPE(index)
assert index>=0, LOGGER.error("index must be bigger than 0")
assert index<=self.__bins, LOGGER.error("index must be smaller than number of bins")
if scaleFactor is not None:
assert is_number(scaleFactor), LOGGER.error("scaleFactor must be a number")
scaleFactor = FLOAT_TYPE(scaleFactor)
assert scaleFactor>=0, LOGGER.error("scaleFactor must be bigger than 0")
# get start indexes
startIdx = index-int(self.__biasBins/2)
if startIdx < 0:
biasStartIdx = -startIdx
startIdx = 0
bias = np.cumsum(self.__biasGuassian[biasStartIdx:]).astype(FLOAT_TYPE)
else:
biasStartIdx = 0
bias = self.__bias
# scale bias
if scaleFactor is None:
scaledBias = bias
else:
scaledBias = bias*scaleFactor
# get end indexes
endIdx = startIdx+self.__biasBins-biasStartIdx
biasEndIdx = len(scaledBias)
if endIdx > self.__bins-1:
biasEndIdx -= endIdx-self.__bins
endIdx = self.__bins
# bias scheme
self.__scheme[startIdx:endIdx] += scaledBias[0:biasEndIdx]
self.__scheme[endIdx:] += scaledBias[biasEndIdx-1]
def bias_scheme_at_position(self, position, scaleFactor=None, check=True):
"""
Bias the generator's scheme using the defined bias gaussian function
at the given number.
:Parameters:
#. position(number): The number to bias.
#. scaleFactor(None, number): Whether to scale the bias gaussian
before biasing the scheme. If None is given, bias gaussian is
used as defined.
#. check(boolean): Whether to check arguments.
"""
if check:
assert is_number(position), LOGGER.error("position must be a number")
position = FLOAT_TYPE(position)
assert position>=self.lowerLimit, LOGGER.error("position must be bigger than lowerLimit")
assert position<=self.upperLimit, LOGGER.error("position must be smaller than upperLimit")
index = INT_TYPE(self.__bins*(position-self.lowerLimit)/self.rang)
# bias scheme by index
self.bias_scheme_by_index(index=index, scaleFactor=scaleFactor, check=check)
def unbias_scheme_by_index(self, index, scaleFactor=None, check=True):
"""
Unbias the generator's scheme using the defined bias gaussian
function at the given index.
:Parameters:
#. index(integer): The index of the position to unbias.
#. scaleFactor(None, number): Whether to scale the unbias gaussian
before unbiasing the scheme. If None is given, unbias gaussian
is used as defined.
#. check(boolean): Whether to check arguments.
"""
if not self.__unbiasHeight>0: return
if check:
assert is_integer(index), LOGGER.error("index must be an integer")
index = INT_TYPE(index)
assert index>=0, LOGGER.error("index must be bigger than 0")
assert index<=self.__bins, LOGGER.error("index must be smaller than number of bins")
if scaleFactor is not None:
assert is_number(scaleFactor), LOGGER.error("scaleFactor must be a number")
scaleFactor = FLOAT_TYPE(scaleFactor)
assert scaleFactor>=0, LOGGER.error("scaleFactor must be bigger than 0")
# get start indexes
startIdx = index-int(self.__unbiasBins/2)
if startIdx < 0:
biasStartIdx = -startIdx
startIdx = 0
unbias = self.__unbiasGuassian[biasStartIdx:]
else:
biasStartIdx = 0
unbias = self.__unbiasGuassian
# get end indexes
endIdx = startIdx+self.__unbiasBins-biasStartIdx
biasEndIdx = len(unbias)
if endIdx > self.__bins-1:
biasEndIdx -= endIdx-self.__bins
endIdx = self.__bins
# scale unbias
if scaleFactor is None:
scaledUnbias = unbias
else:
scaledUnbias = unbias*scaleFactor
# unbias weights
weights = np.array(self.weights)
weights[startIdx:endIdx] += scaledUnbias[0:biasEndIdx]
# correct for negatives
weights[np.where(weights<self.__unbiasThreshold)] = self.__unbiasThreshold
# set unbiased scheme
self.__scheme = np.cumsum(weights)
def unbias_scheme_at_position(self, position, scaleFactor=None, check=True):
"""
Unbias the generator's scheme using the defined bias gaussian
function at the given number.
:Parameters:
#. position(number): The number to unbias.
#. scaleFactor(None, number): Whether to scale the unbias gaussian
before unbiasing the scheme. If None is given, unbias gaussian
is used as defined.
#. check(boolean): Whether to check arguments.
"""
if check:
assert is_number(position), LOGGER.error("position must be a number")
position = FLOAT_TYPE(position)
assert position>=self.lowerLimit, LOGGER.error("position must be bigger than lowerLimit")
assert position<=self.upperLimit, LOGGER.error("position must be smaller than upperLimit")
index = INT_TYPE(self.__bins*(position-self.lowerLimit)/self.rang)
# bias scheme by index
self.unbias_scheme_by_index(index=index, scaleFactor=scaleFactor, check=check)
def generate(self):
"""Generate a random float number between the biased range
lowerLimit and upperLimit."""
# get position
position = self.lowerLimit + self.__binWidth*np.searchsorted(self.__scheme, generate_random_float()*self.__scheme[-1]) + self.__halfBinWidth
# find limits
minLim = max(self.lowerLimit, position-self.__halfBinWidth)
maxLim = min(self.upperLimit, position+self.__halfBinWidth)
# generate number
return minLim+generate_random_float()*(maxLim-minLim) + self.__halfBinWidth
class RandomIntegerGenerator(object):
"""
Generate random integer number between a lower and an upper limit.
:Parameters:
#. lowerLimit (number): Lower limit allowed.
#. upperLimit (number): Upper limit allowed.
"""
def __init__(self, lowerLimit, upperLimit):
self.__lowerLimit = None
self.__upperLimit = None
self.set_lower_limit(lowerLimit)
self.set_upper_limit(upperLimit)
@property
def lowerLimit(self):
"""Lower limit of the number generation."""
return self.__lowerLimit
@property
def upperLimit(self):
"""Upper limit of the number generation."""
return self.__upperLimit
@property
def rang(self):
"""The range defined as upperLimit-lowerLimit"""
return self.__rang
def set_lower_limit(self, lowerLimit):
"""
Set lower limit.
:Parameters:
#. lowerLimit (number): The lower limit allowed.
"""
assert is_integer(lowerLimit), LOGGER.error("lowerLimit must be numbers")
self.__lowerLimit = INT_TYPE(lowerLimit)
if self.__upperLimit is not None:
assert self.__lowerLimit<self.__upperLimit, LOGGER.error("lower limit must be smaller than the upper one")
self.__rang = self.__upperLimit-self.__lowerLimit+1
def set_upper_limit(self, upperLimit):
"""
Set upper limit.
:Parameters:
#. upperLimit (number): The upper limit allowed.
"""
assert is_integer(upperLimit), LOGGER.error("upperLimit must be numbers")
self.__upperLimit = INT_TYPE(upperLimit)
if self.__lowerLimit is not None:
assert self.__lowerLimit<self.__upperLimit, LOGGER.error("lower limit must be smaller than the upper one")
self.__rang = self.__upperLimit-self.__lowerLimit+1
def generate(self):
"""Generate a random integer number between lowerLimit and upperLimit."""
return generate_random_integer(self.__lowerLimit, self.__upperLimit)
class BiasedRandomIntegerGenerator(RandomIntegerGenerator):
"""
Generate biased random integer number between a lower and an upper limit.
To bias the generator at a certain number, a bias height is added to the
weights scheme at the position of this particular number.
.. image:: biasedIntegerGenerator.png
:align: center
:Parameters:
#. lowerLimit (integer): The lower limit allowed.
#. upperLimit (integer): The upper limit allowed.
#. weights (None, list, numpy.ndarray): The weights scheme.
The length must be equal to the range between lowerLimit
and upperLimit. If None is given, ones array of length
upperLimit-lowerLimit+1 is automatically generated.
#. biasHeight(number): The weight bias intensity.
#. unbiasHeight(None, number): The weight unbias intensity.
If None, it will be automatically set to biasHeight.
#. unbiasThreshold(number): unbias is only applied at a certain
position only when the position weight is above unbiasThreshold.
It must be a positive number.
"""
def __init__(self, lowerLimit, upperLimit,
weights=None,
biasHeight=1, unbiasHeight=None, unbiasThreshold=1):
# initialize random generator
super(BiasedRandomIntegerGenerator, self).__init__(lowerLimit=lowerLimit, upperLimit=upperLimit)
# set weights
self.set_weights(weights=weights)
# set bias height
self.set_bias_height(biasHeight=biasHeight)
# set bias height
self.set_unbias_height(unbiasHeight=unbiasHeight)
# set bias height
self.set_unbias_threshold(unbiasThreshold=unbiasThreshold)
@property
def originalWeights(self):
"""Original weights as initialized."""
return self.__originalWeights
@property
def weights(self):
"""Current value weights vector."""
weights = self.__scheme[1:]-self.__scheme[:-1]
weights = list(weights)
weights.insert(0,self.__scheme[0])
return weights
@property
def scheme(self):
"""Numbers generation scheme."""
return self.__scheme
@property
def bins(self):
"""Number of bins that is equal to the length of weights vector."""
return self.__bins
def set_weights(self, weights):
"""
Set the generator integer numbers weights.
:Parameters:
#. weights (None, list, numpy.ndarray): The weights scheme.
The length must be equal to the range between lowerLimit and
upperLimit. If None is given, ones array of length
upperLimit-lowerLimit+1 is automatically generated.
"""
if weights is None:
self.__originalWeights = np.ones(self.upperLimit-self.lowerLimit+1)
else:
assert isinstance(weights, (list, set, tuple, np.ndarray)), LOGGER.error("weights must be a list of numbers")
if isinstance(weights, np.ndarray):
assert len(weights.shape)==1, LOGGER.error("weights must be uni-dimensional")
wgts = []
assert len(weights)==self.upperLimit-self.lowerLimit+1, LOGGER.error("weights length must be exactly equal to 'upperLimit-lowerLimit+1' which is %i"%self.upperLimit-self.lowerLimit+1)
for w in weights:
assert is_number(w), LOGGER.error("weights items must be numbers")
w = FLOAT_TYPE(w)
assert w>=0, LOGGER.error("weights items must be positive")
wgts.append(w)
self.__originalWeights = np.array(wgts, dtype=FLOAT_TYPE)
# set bins
self.__bins = len( self.__originalWeights )
# set scheme
self.__scheme = np.cumsum( self.__originalWeights )
def set_bias_height(self, biasHeight):
"""
Set weight bias intensity.
:Parameters:
#. biasHeight(number): Weight bias intensity.
"""
assert is_number(biasHeight), LOGGER.error("biasHeight must be a number")
self.__biasHeight = FLOAT_TYPE(biasHeight)
assert self.__biasHeight>0, LOGGER.error("biasHeight must be bigger than 0")
def set_unbias_height(self, unbiasHeight):
"""
Set weight unbias intensity.
:Parameters:
#. unbiasHeight(None, number): The weight unbias intensity.
If None, it will be automatically set to biasHeight.
"""
if unbiasHeight is None:
unbiasHeight = self.__biasHeight
assert is_number(unbiasHeight), LOGGER.error("unbiasHeight must be a number")
self.__unbiasHeight = FLOAT_TYPE(unbiasHeight)
assert self.__unbiasHeight>=0, LOGGER.error("unbiasHeight must be positive")
def set_unbias_threshold(self, unbiasThreshold):
"""
Set weight unbias threshold.
:Parameters:
#. unbiasThreshold(number): unbias is only applied at a certain
position only when the position weight is above unbiasThreshold.
It must be a positive number.
"""
assert is_number(unbiasThreshold), LOGGER.error("unbiasThreshold must be a number")
self.__unbiasThreshold = FLOAT_TYPE(unbiasThreshold)
assert self.__unbiasThreshold>=0, LOGGER.error("unbiasThreshold must be positive")
def bias_scheme_by_index(self, index, scaleFactor=None, check=True):
"""
Bias the generator's scheme at the given index.
:Parameters:
#. index(integer): The index of the position to bias
#. scaleFactor(None, number): Whether to scale the bias gaussian
before biasing the scheme. If None, bias gaussian is used as
defined.
#. check(boolean): Whether to check arguments.
"""
if not self.__biasHeight>0: return
if check:
assert is_integer(index), LOGGER.error("index must be an integer")
index = INT_TYPE(index)
assert index>=0, LOGGER.error("index must be bigger than 0")
assert index<=self.__bins, LOGGER.error("index must be smaller than number of bins")
if scaleFactor is not None:
assert is_number(scaleFactor), LOGGER.error("scaleFactor must be a number")
scaleFactor = FLOAT_TYPE(scaleFactor)
assert scaleFactor>=0, LOGGER.error("scaleFactor must be bigger than 0")
# scale bias
if scaleFactor is None:
scaledBias = self.__biasHeight
else:
scaledBias = self.__biasHeight*scaleFactor
# bias scheme
self.__scheme[index:] += scaledBias
def bias_scheme_at_position(self, position, scaleFactor=None, check=True):
"""
Bias the generator's scheme at the given number.
:Parameters:
#. position(number): The number to bias.
#. scaleFactor(None, number): Whether to scale the bias gaussian
before biasing the scheme. If None, bias gaussian is used as
defined.
#. check(boolean): Whether to check arguments.
"""
if check:
assert is_integer(position), LOGGER.error("position must be an integer")
position = INT_TYPE(position)
assert position>=self.lowerLimit, LOGGER.error("position must be bigger than lowerLimit")
assert position<=self.upperLimit, LOGGER.error("position must be smaller than upperLimit")
index = position-self.lowerLimit
# bias scheme by index
self.bias_scheme_by_index(index=index, scaleFactor=scaleFactor, check=check)
def unbias_scheme_by_index(self, index, scaleFactor=None, check=True):
"""
Unbias the generator's scheme at the given index.
:Parameters:
#. index(integer): The index of the position to unbias
#. scaleFactor(None, number): Whether to scale the unbias gaussian
before unbiasing the scheme. If None is given,
unbias gaussian is used as defined.
#. check(boolean): Whether to check arguments.
"""
if not self.__unbiasHeight>0: return
if check:
assert is_integer(index), LOGGER.error("index must be an integer")
index = INT_TYPE(index)
assert index>=0, LOGGER.error("index must be bigger than 0")
assert index<=self.__bins, LOGGER.error("index must be smaller than number of bins")
if scaleFactor is not None:
assert is_number(scaleFactor), LOGGER.error("scaleFactor must be a number")
scaleFactor = FLOAT_TYPE(scaleFactor)
assert scaleFactor>=0, LOGGER.error("scaleFactor must be bigger than 0")
# scale unbias
if scaleFactor is None:
scaledUnbias = self.__unbiasHeight
else:
scaledUnbias = self.__unbiasHeight*scaleFactor
# check threshold
if index == 0:
scaledUnbias = max(scaledUnbias, self.__scheme[index]-self.__unbiasThreshold)
elif self.__scheme[index]-scaledUnbias < self.__scheme[index-1]+self.__unbiasThreshold:
scaledUnbias = self.__scheme[index]-self.__scheme[index-1]-self.__unbiasThreshold
# unbias scheme
self.__scheme[index:] -= scaledUnbias
def unbias_scheme_at_position(self, position, scaleFactor=None, check=True):
"""
Unbias the generator's scheme using the defined bias gaussian
function at the given number.
:Parameters:
#. position(number): The number to unbias.
#. scaleFactor(None, number): Whether to scale the unbias gaussian
before unbiasing the scheme. If None is given, unbias
gaussian is used as defined.
#. check(boolean): Whether to check arguments.
"""
if check:
assert is_integer(position), LOGGER.error("position must be an integer")
position = INT_TYPE(position)
assert position>=self.lowerLimit, LOGGER.error("position must be bigger than lowerLimit")
assert position<=self.upperLimit, LOGGER.error("position must be smaller than upperLimit")
index = position-self.lowerLimit
# unbias scheme by index
self.unbias_scheme_by_index(index=index, scaleFactor=scaleFactor, check=check)
def generate(self):
"""Generate a random intger number between the biased range
lowerLimit and upperLimit."""
index = INT_TYPE( np.searchsorted(self.__scheme, generate_random_float()*self.__scheme[-1]) )
return self.lowerLimit + index
| agpl-3.0 |
wzbozon/statsmodels | statsmodels/datasets/elnino/data.py | 25 | 1779 | """El Nino dataset, 1950 - 2010"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is in the public domain."""
TITLE = """El Nino - Sea Surface Temperatures"""
SOURCE = """
National Oceanic and Atmospheric Administration's National Weather Service
ERSST.V3B dataset, Nino 1+2
http://www.cpc.ncep.noaa.gov/data/indices/
"""
DESCRSHORT = """Averaged monthly sea surface temperature - Pacific Ocean."""
DESCRLONG = """This data contains the averaged monthly sea surface
temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South
and 90-80 degrees West, from 1950 to 2010. This dataset was obtained from
NOAA.
"""
NOTE = """::
Number of Observations - 61 x 12
Number of Variables - 1
Variable name definitions::
TEMPERATURE - average sea surface temperature in degrees Celcius
(12 columns, one per month).
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the El Nino data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The elnino Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/elnino.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
harapon/catsudon | src/DestinationAnalysis.py | 1 | 17640 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import random
import datetime
import numpy as np
from sklearn.cluster import KMeans
import LatLonCalc
import foursquare_API
def output(fo, outputList):
text = ""
for i in range(len(outputList)-1):
text = text + str(outputList[i]) + ","
text = text + str(outputList[len(outputList)-1]) + "\n"
fo.write(text)
def MakeDestinationDataforClustering(UserTripData):
#Kmeans用データの作成
NumOfDestinationsAll = 0
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
NumOfDestinationsAll += 2
DestinationData = np.zeros((NumOfDestinationsAll, 2))
itr = 0
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
DestinationData[itr,:] = [float(UserTripData[datadate][tripID].OriginPlaceLat)*1.5, float(UserTripData[datadate][tripID].OriginPlaceLon)]
itr += 1
DestinationData[itr,:] = [float(UserTripData[datadate][tripID].DestinationPlaceLat)*1.5, float(UserTripData[datadate][tripID].DestinationPlaceLon)]
itr += 1
return (DestinationData, NumOfDestinationsAll)
def CalcNumOfCanopy(UserTripData, T1=500, T2=250):
DestinationDict = {}
destItr = 0
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
DestinationDict.update({destItr:[UserTripData[datadate][tripID].OriginPlaceLat, UserTripData[datadate][tripID].OriginPlaceLon]})
destItr += 1
DestinationDict.update({destItr:[UserTripData[datadate][tripID].DestinationPlaceLat, UserTripData[datadate][tripID].DestinationPlaceLon]})
destItr += 1
CanopyDict = {}
NumOfCanopy = 0
while len(DestinationDict.keys()) > 0:
NumOfDestinations = len(DestinationDict.keys())
candidate = DestinationDict.keys()[int(math.floor(random.random()*NumOfDestinations))]
candidateLat = DestinationDict[candidate][0]
candidateLon = DestinationDict[candidate][1]
canopyT1List = []
canopyT2List = []
for dest in DestinationDict.keys():
d1 = LatLonCalc.CalcDistance(candidateLat, candidateLon, DestinationDict[dest][0], DestinationDict[dest][1])
if d1 <= T1:
canopyT1List.append(dest)
if d1 <= T2:
canopyT2List.append(dest)
DestinationDict.pop(dest)
CanopyDict.update({NumOfCanopy:canopyT2List})
NumOfCanopy += 1
return NumOfCanopy
def DestinationKMeans(DestinationData, UserTripData, K):
kmeans_model = KMeans(n_clusters=K, random_state=10).fit(DestinationData)
labels = kmeans_model.labels_
#各clusterのラベル,緯度・経度の格納
clusteringLabelDict = {}
for i in range(K):
clusterLat = kmeans_model.cluster_centers_[i][0]/1.5
clusterLon = kmeans_model.cluster_centers_[i][1]
clusteringLabelDict.update({i:[clusterLat, clusterLon]})
#各データの情報を格納
itr = 0
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
UserTripData[datadate][tripID].OriginPlaceID = labels[itr]
UserTripData[datadate][tripID].OriginPlaceLat = clusteringLabelDict[labels[itr]][0]
UserTripData[datadate][tripID].OriginPlaceLon = clusteringLabelDict[labels[itr]][1]
itr += 1
UserTripData[datadate][tripID].DestinationPlaceID = labels[itr]
UserTripData[datadate][tripID].DestinationPlaceLat = clusteringLabelDict[labels[itr]][0]
UserTripData[datadate][tripID].DestinationPlaceLon = clusteringLabelDict[labels[itr]][1]
itr += 1
return (UserTripData, clusteringLabelDict)
def ReadPOICategory(src):
POICategoryDict = {}
for line in open(src):
line = line.strip()
line = line.split('\t')
POICategoryDict.update({unicode(line[0],'utf-8'):line[2]})
return POICategoryDict
def AddPOICharacteristicsFrom4sq(clusteringLabelDict, POICategory, setting4sq):
POIDict = {}
choiceSetDict = {}
Client_id = setting4sq[0]
Client_secret = setting4sq[1]
for cluster in clusteringLabelDict.keys():
clusterLat = clusteringLabelDict[cluster][0]
clusterLon = clusteringLabelDict[cluster][1]
placeDict = foursquare_API.request_4sq(clusterLat, clusterLon, Client_id, Client_secret)
clusterchoiceSetDict = {}
if len(placeDict.keys()) > 10:
itr = 0
for place in sorted(placeDict.keys()):
try:
POICategory[placeDict[place]["category"]]
(POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance) = (placeDict[place]["name"], POICategory[placeDict[place]["category"]], placeDict[place]["category"], placeDict[place]["lat"], placeDict[place]["lon"], placeDict[place]["distance"])
clusterchoiceSetDict.update({itr:[POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance]})
itr += 1
except KeyError:
(POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance) = (placeDict[place]["name"], u"専門その他", u"不明", placeDict[place]["lat"], placeDict[place]["lon"], placeDict[place]["distance"])
clusterchoiceSetDict.update({itr:[POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance]})
itr += 1
if itr == 10:
break
else:
itr = 0
for place in sorted(placeDict.keys()):
try:
POICategory[placeDict[place]["category"]]
(POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance) = (placeDict[place]["name"], POICategory[placeDict[place]["category"]], placeDict[place]["category"], placeDict[place]["lat"], placeDict[place]["lon"], placeDict[place]["distance"])
clusterchoiceSetDict.update({itr:[POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance]})
itr += 1
except KeyError:
(POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance) = (placeDict[place]["name"], u"専門その他", u"不明", placeDict[place]["lat"], placeDict[place]["lon"], placeDict[place]["distance"])
clusterchoiceSetDict.update({itr:[POIName, POIcategory, POIsubcategory, POILat, POILon, POIDistance]})
itr += 1
try:
clusterPOIName = clusterchoiceSetDict[0][0]
clusterPOIcategory = clusterchoiceSetDict[0][1]
clusterPOILat = clusterchoiceSetDict[0][3]
clusterPOILon = clusterchoiceSetDict[0][4]
except KeyError:
clusterPOIName = u"不明"
clusterPOIcategory = u"専門その他"
clusterPOILat = 0
clusterPOILon = 0
stayTimeWindow = [0]*288
#print cluster, clusterLat, clusterLon, POIName, POIcategory, POILat, POILon
POIDict.update({cluster:[clusterPOIName, clusterPOIcategory, clusterPOILat, clusterPOILon, stayTimeWindow, 0, 0]})
choiceSetDict.update({clusterPOIName:clusterchoiceSetDict})
return POIDict, choiceSetDict
def IdentifyHomeWorkPlace(UserTripData, POIDict, clusteringLabelDict, NumOfDayData):
###Destination Nameの名寄せ
DestinationNameDict = {}
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
ID1 = UserTripData[datadate][tripID].OriginPlaceID
UserTripData[datadate][tripID].OriginPlacePOI = POIDict[ID1][0]
UserTripData[datadate][tripID].OriginPlaceCategory = POIDict[ID1][1]
Name1 = UserTripData[datadate][tripID].OriginPlacePOI
ID2 = UserTripData[datadate][tripID].DestinationPlaceID
UserTripData[datadate][tripID].DestinationPlacePOI = POIDict[ID2][0]
UserTripData[datadate][tripID].DestinationPlaceCategory = POIDict[ID2][1]
Name2 = UserTripData[datadate][tripID].DestinationPlacePOI
if Name1 != u"不明":
try:
DestinationNameDict[Name1]
if ID1 not in DestinationNameDict[Name1]:
DestinationNameDict[Name1].append(ID1)
except KeyError:
DestinationNameDict.update({Name1:[ID1]})
if Name2 != u"不明":
try:
DestinationNameDict[Name2]
if ID2 not in DestinationNameDict[Name2]:
DestinationNameDict[Name2].append(ID2)
except KeyError:
DestinationNameDict.update({Name2:[ID2]})
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
Name1 = UserTripData[datadate][tripID].OriginPlacePOI
if Name1 != u"不明":
UserTripData[datadate][tripID].OriginPlaceID = sorted(DestinationNameDict[Name1])[0]
Name2 = UserTripData[datadate][tripID].DestinationPlacePOI
if Name2 != u"不明":
UserTripData[datadate][tripID].DestinationPlaceID = sorted(DestinationNameDict[Name2])[0]
standardTime = datetime.datetime(2000, 1, 1, 0, 0, 0)
stayStartTime = datetime.datetime(2000, 1, 1, 0, 0, 0)
itr = 0
stayStartPlaceID = 0
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
if itr > 1:
stayEndTime = UserTripData[datadate][tripID].TripStartTimestamp
stayEndPlaceID = UserTripData[datadate][tripID].OriginPlaceID
stayTime = (stayEndTime - stayStartTime).seconds
if stayTime <= 86400 and stayStartPlaceID == stayEndPlaceID:
startTimeWindow = (stayStartTime - standardTime).seconds/300
endTimeWindow = (stayEndTime - standardTime).seconds/300
if endTimeWindow >= startTimeWindow and endTimeWindow <= 288:
for i in range(startTimeWindow, (endTimeWindow+1)):
POIDict[stayStartPlaceID][4][i] += 1
elif endTimeWindow >= startTimeWindow and endTimeWindow == 288:
for i in range(startTimeWindow, endTimeWindow):
POIDict[stayStartPlaceID][4][i] += 1
POIDict[stayStartPlaceID][4][0] += 1
elif endTimeWindow < startTimeWindow:
for i in range(startTimeWindow, 288):
POIDict[stayStartPlaceID][4][i] += 1
for i in range(0, (endTimeWindow+1)):
POIDict[stayStartPlaceID][4][i] += 1
elif stayTime <= 86400 and stayStartPlaceID != stayEndPlaceID:
startTimeWindow = (stayStartTime - standardTime).seconds/300
endTimeWindow = (stayEndTime - standardTime).seconds/300
if endTimeWindow >= startTimeWindow and endTimeWindow <= 288:
for i in range(startTimeWindow, (endTimeWindow+1)):
POIDict[stayStartPlaceID][4][i] += 0.5
elif endTimeWindow >= startTimeWindow and endTimeWindow == 288:
for i in range(startTimeWindow, endTimeWindow):
POIDict[stayStartPlaceID][4][i] += 0.5
POIDict[stayStartPlaceID][4][0] += 0.5
elif endTimeWindow < startTimeWindow:
for i in range(startTimeWindow, 288):
POIDict[stayStartPlaceID][4][i] += 0.5
for i in range(0, (endTimeWindow+1)):
POIDict[stayStartPlaceID][4][i] += 0.5
stayStartTime = UserTripData[datadate][tripID].TripEndTimestamp
stayStartPlaceID = UserTripData[datadate][tripID].DestinationPlaceID
itr += 1
dayTimePOIDict = {}
nightTimePOIDict = {}
for poi in POIDict.keys():
dayTimePOIDict.update({poi:sum(POIDict[poi][4][132:204])})
nightTimePOIDict.update({poi:sum(POIDict[poi][4][264:288]) + sum(POIDict[poi][4][0:72])})
POIDict[poi][5] = sum(POIDict[poi][4][132:204])
POIDict[poi][6] = sum(POIDict[poi][4][264:288]) + sum(POIDict[poi][4][0:72])
homeIDList = []
workIDList = []
for key, value in sorted(nightTimePOIDict.items(), key=lambda x:x[1], reverse=True):
homePOI = key
if value/96 > NumOfDayData*0.1:
homeIDList.append(homePOI)
else:
break
for key, value in sorted(dayTimePOIDict.items(), key=lambda x:x[1], reverse=True):
workPOI = key
if value/72 > NumOfDayData*0.1:
workIDList.append(workPOI)
else:
break
for homeID in homeIDList:
POIDict[homeID][0] = "home"
POIDict[homeID][1] = "home"
homeLat = clusteringLabelDict[homeID][0]
homeLon = clusteringLabelDict[homeID][1]
POIDict[homeID][2] = homeLat
POIDict[homeID][3] = homeLon
for cluster in clusteringLabelDict.keys():
clusterLat = clusteringLabelDict[cluster][0]
clusterLon = clusteringLabelDict[cluster][1]
distance = LatLonCalc.CalcDistance(homeLat, homeLon, clusterLat, clusterLon)
if cluster != homeID and distance < 300:
POIDict[cluster][0] = "home"
POIDict[cluster][1] = "home"
POIDict[cluster][2] = clusterLat
POIDict[cluster][3] = clusterLon
for workID in workIDList:
POIDict[workID][1] = "work"
workLat = clusteringLabelDict[workID][0]
workLon = clusteringLabelDict[workID][1]
POIDict[workID][2] = workLat
POIDict[workID][3] = workLon
for cluster in clusteringLabelDict.keys():
clusterLat = clusteringLabelDict[cluster][0]
clusterLon = clusteringLabelDict[cluster][1]
distance = LatLonCalc.CalcDistance(workLat, workLon, clusterLat, clusterLon)
if cluster != workID and distance < 300 and POIDict[cluster][1] != "home" and POIDict[cluster][1] != "work":
POIDict[cluster][1] = "work"
POIDict[cluster][2] = clusterLat
POIDict[cluster][3] = clusterLon
#格納
for datadate in sorted(UserTripData.keys()):
for tripID in sorted(UserTripData[datadate].keys()):
ID1 = UserTripData[datadate][tripID].OriginPlaceID
UserTripData[datadate][tripID].OriginPlacePOI = POIDict[ID1][0]
UserTripData[datadate][tripID].OriginPlaceCategory = POIDict[ID1][1]
UserTripData[datadate][tripID].OriginPlaceLat = POIDict[ID1][2]
UserTripData[datadate][tripID].OriginPlaceLon = POIDict[ID1][3]
ID2 = UserTripData[datadate][tripID].DestinationPlaceID
UserTripData[datadate][tripID].DestinationPlacePOI = POIDict[ID2][0]
UserTripData[datadate][tripID].DestinationPlaceCategory = POIDict[ID2][1]
UserTripData[datadate][tripID].DestinationPlaceLat = POIDict[ID2][2]
UserTripData[datadate][tripID].DestinationPlaceLon = POIDict[ID2][3]
return (UserTripData, POIDict)
def WriteTripData(UserTripData, choiceSetDict, outputPATH, outputFileName):
dst = outputPATH + outputFileName + "_tripdata.csv"
fo = open(dst, 'w')
for datadate in sorted(UserTripData.keys()):
for tripID in UserTripData[datadate].keys():
outputList = [UserTripData[datadate][tripID].TripStartTimestamp, UserTripData[datadate][tripID].OriginPlacePOI, UserTripData[datadate][tripID].OriginPlaceCategory, UserTripData[datadate][tripID].OriginPlaceLat, UserTripData[datadate][tripID].OriginPlaceLon, UserTripData[datadate][tripID].TripEndTimestamp, UserTripData[datadate][tripID].DestinationPlacePOI, UserTripData[datadate][tripID].DestinationPlaceCategory, UserTripData[datadate][tripID].DestinationPlaceLat, UserTripData[datadate][tripID].DestinationPlaceLon]
"""
try:
for choice in choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI].keys():
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][0])
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][1])
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][2])
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][3])
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][4])
outputList.append(choiceSetDict[UserTripData[datadate][tripID].DestinationPlacePOI][choice][5])
except KeyError:
pass
"""
output(fo, outputList)
fo.close() | mit |
jayfans3/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
sorig/shogun | examples/undocumented/python/graphical/regression_gaussian_process_demo.py | 6 | 9237 | ###########################################################################
# Mean prediction from Gaussian Processes based on
# classifier_libsvm_minimal_modular.py
# plotting functions have been adapted from the pyGP library
# https://github.com/jameshensman/pyGP
###########################################################################
from numpy import *
from numpy.random import randn
from shogun import *
import pylab as PL
import matplotlib
import logging as LG
import scipy as SP
from shogun import GradientModelSelection
from shogun import ModelSelectionParameters, R_EXP, R_LINEAR
from shogun import ParameterCombination
def plot_training_data(x, y,
shift=None,
replicate_indices=None,
format_data={'alpha':.5,
'marker':'.',
'linestyle':'--',
'lw':1,
'markersize':9},
draw_arrows=0,
plot_old=False):
"""
Plot training data input x and output y into the
active figure (See http://matplotlib.sourceforge.net/ for details of figure).
Instance plot without replicate groups:
.. image:: ../images/plotTraining.png
:height: 8cm
Instance plot with two replicate groups and a shift in x-koords:
.. image:: ../images/plotTrainingShiftX.png
:height: 8cm
**Parameters:**
x : [double]
Input x (e.g. time).
y : [double]
Output y (e.g. expression).
shift : [double]
The shift of each replicate group.
replicate_indices : [int]
Indices of replicates for each x, rexpectively
format_data : {format}
Format of the data points. See http://matplotlib.sourceforge.net/ for details.
draw_arrows : int
draw given number of arrows (if greator than len(replicate) draw all arrows.
Arrows will show the time shift for time points, respectively.
"""
x_shift = SP.array(x.copy())
if shift is not None and replicate_indices is not None:
assert len(shift) == len(SP.unique(replicate_indices)), 'Need one shift per replicate to plot properly'
_format_data = format_data.copy()
if(format_data.has_key('alpha')):
_format_data['alpha'] = .2*format_data['alpha']
else:
_format_data['alpha'] = .2
number_of_groups = len(SP.unique(replicate_indices))
for i in SP.unique(replicate_indices):
x_shift[replicate_indices == i] -= shift[i]
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
_format_data['color'] = col
if(plot_old):
PL.plot(x[replicate_indices == i], y[replicate_indices == i], **_format_data)
if(draw_arrows):
range = SP.where(replicate_indices == i)[0]
for n in SP.arange(range[0], range[-1], max(1, round(len(range) / draw_arrows))):
offset = round((len(range)-1) / draw_arrows)
n += max(int((i+1)*offset/number_of_groups),1)
PL.text((x_shift[n]+x[n])/2., y[n],
"%.2f"%(-shift[i]),
ha='center',va='center',
fontsize=10)
PL.annotate('', xy=(x_shift[n], y[n]),
xytext=(x[n], y[n]),va='center',
arrowprops=dict(facecolor=col,
alpha=.2,
shrink=.01,
frac=.2,
headwidth=11,
width=11))
#PL.plot(x,y,**_format_data)
if(replicate_indices is not None):
number_of_groups = len(SP.unique(replicate_indices))
#format_data['markersize'] = 13
#format_data['alpha'] = .5
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x_shift[replicate_indices == i], y[replicate_indices == i], **format_data)
else:
print(x_shift.shape)
number_of_groups = x_shift.shape[0]
for i in xrange(number_of_groups):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x[i], y[i], **format_data)
# return PL.plot(x_shift,y,**format_data)
def plot_sausage(X, mean, std, alpha=None, format_fill={'alpha':0.3, 'facecolor':'k'}, format_line=dict(alpha=1, color='g', lw=3, ls='dashed')):
"""
plot saussage plot of GP. I.e:
.. image:: ../images/sausage.png
:height: 8cm
**returns:** : [fill_plot, line_plot]
The fill and the line of the sausage plot. (i.e. green line and gray fill of the example above)
**Parameters:**
X : [double]
Interval X for which the saussage shall be plottet.
mean : [double]
The mean of to be plottet.
std : [double]
Pointwise standard deviation.
format_fill : {format}
The format of the fill. See http://matplotlib.sourceforge.net/ for details.
format_line : {format}
The format of the mean line. See http://matplotlib.sourceforge.net/ for details.
"""
X = X.squeeze()
Y1 = (mean + 2 * std)
Y2 = (mean - 2 * std)
if(alpha is not None):
old_alpha_fill = min(1, format_fill['alpha'] * 2)
for i, a in enumerate(alpha[:-2]):
format_fill['alpha'] = a * old_alpha_fill
hf = PL.fill_between(X[i:i + 2], Y1[i:i + 2], Y2[i:i + 2], lw=0, **format_fill)
i += 1
hf = PL.fill_between(X[i:], Y1[i:], Y2[i:], lw=0, **format_fill)
else:
hf = PL.fill_between(X, Y1, Y2, **format_fill)
hp = PL.plot(X, mean, **format_line)
return [hf, hp]
class CrossRect(matplotlib.patches.Rectangle):
def __init__(self, *args, **kwargs):
matplotlib.patches.Rectangle.__init__(self, *args, **kwargs)
#self.ax = ax
# def get_verts(self):
# rectverts = matplotlib.patches.Rectangle.get_verts(self)
# return verts
def get_path(self, *args, **kwargs):
old_path = matplotlib.patches.Rectangle.get_path(self)
verts = []
codes = []
for vert, code in old_path.iter_segments():
verts.append(vert)
codes.append(code)
verts.append([1, 1])
codes.append(old_path.LINETO)
new_path = matplotlib.artist.Path(verts, codes)
return new_path
def create_toy_data():
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
xmin = 1
xmax = 2.5*SP.pi
x = SP.arange(xmin,xmax,(xmax-xmin)/100.0)
C = 2 #offset
sigma = 0.5
b = 0
y = b*x + C + 1*SP.sin(x)
# dy = b + 1*SP.cos(x)
y += sigma*random.randn(y.shape[0])
y-= y.mean()
x = x[:,SP.newaxis]
return [x,y]
def run_demo():
LG.basicConfig(level=LG.INFO)
random.seed(572)
#1. create toy data
[x,y] = create_toy_data()
feat_train = RealFeatures(transpose(x));
labels = RegressionLabels(y);
n_dimensions = 1
#2. location of unispaced predictions
X = SP.linspace(0,10,10)[:,SP.newaxis]
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
covar_parms = SP.log([2])
hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
#construct covariance function
SECF = GaussianKernel(feat_train, feat_train,2)
covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood);
gp = GaussianProcessRegression(inf, feat_train, labels);
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels,
crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(
root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV);
St = gp.apply_regression(feat_train);
St = St.get_labels();
gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS);
M = gp.apply_regression();
M = M.get_labels();
#create plots
plot_sausage(transpose(x),transpose(M),transpose(SP.sqrt(St)));
plot_training_data(x,y);
PL.show();
if __name__ == '__main__':
run_demo()
| bsd-3-clause |
nhejazi/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 39 | 5911 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_normalizer(tokens):
""" Map all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
return ("#NUMBER" if token[0].isdigit() else token for token in tokens)
class NumberNormalizingVectorizer(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = NumberNormalizingVectorizer(stop_words='english', min_df=5)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
NifTK/MITK | Modules/Biophotonics/python/inverseMonteCarlo/evaluateRandomForest.py | 3 | 1102 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 24 09:45:05 2015
@author: wirkert
"""
import numpy as np
from setup import data
from randomForest import randomForest
#%% load data
# the folder with the reflectance spectra
dataFolder = 'data/output/'
trainingParameters, trainingReflectances, testParameters, testReflectances = \
data.noisy(dataFolder)
trainingWeights = np.ones((trainingParameters.shape[0],))
#%% train forest
rf = randomForest(trainingParameters, trainingReflectances, trainingWeights)
#%% test
#with open("iris.dot", 'w') as f:
# f = tree.export_graphviz(rf, out_file=f)
# predict test reflectances and get absolute errors.
absErrors = np.abs(rf.predict(testReflectances) - testParameters)
r2Score = rf.score(testReflectances, testParameters)
#import matplotlib.pyplot as plt
print("absolute error distribution BVF, Volume fraction")
print("median: " + str(np.median(absErrors, axis=0)))
print("lower quartile: " + str(np.percentile(absErrors, 25, axis=0)))
print("higher quartile: " + str(np.percentile(absErrors, 75, axis=0)))
print("r2Score", str(r2Score))
| bsd-3-clause |
ljschumacher/tierpsy-tracker | tierpsy/analysis/blob_feats/getBlobsFeats.py | 1 | 6209 | import json
import os
import cv2
import numpy as np
import pandas as pd
import tables
from tierpsy.analysis.ske_create.getSkeletonsTables import getWormMask
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
from tierpsy.helper.misc import TABLE_FILTERS
def _getBlobFeatures(blob_cnt, blob_mask, roi_image, roi_corner):
if blob_cnt.size > 0:
area = float(cv2.contourArea(blob_cnt))
# find use the best rotated bounding box, the fitEllipse function produces bad results quite often
# this method is better to obtain an estimate of the worm length than
# eccentricity
(CMx, CMy), (L, W), angle = cv2.minAreaRect(blob_cnt)
#adjust CM from the ROI reference frame to the image reference
CMx += roi_corner[0]
CMy += roi_corner[1]
if L == 0 or W == 0:
return None #something went wrong abort
if W > L:
L, W = W, L # switch if width is larger than length
quirkiness = np.sqrt(1 - W**2 / L**2)
hull = cv2.convexHull(blob_cnt) # for the solidity
solidity = area / cv2.contourArea(hull)
perimeter = float(cv2.arcLength(blob_cnt, True))
compactness = 4 * np.pi * area / (perimeter**2)
# calculate the mean intensity of the worm
intensity_mean, intensity_std = cv2.meanStdDev(roi_image, mask=blob_mask)
intensity_mean = intensity_mean[0,0]
intensity_std = intensity_std[0,0]
# calculate hu moments, they are scale and rotation invariant
hu_moments = cv2.HuMoments(cv2.moments(blob_cnt))
# save everything into the the proper output format
mask_feats = (CMx,
CMy,
area,
perimeter,
L,
W,
quirkiness,
compactness,
angle,
solidity,
intensity_mean,
intensity_std,
*hu_moments.flatten())
else:
return tuple([np.nan]*19)
return mask_feats
def getBlobsFeats(skeletons_file, masked_image_file, strel_size):
# extract the base name from the masked_image_file. This is used in the
# progress status.
base_name = masked_image_file.rpartition('.')[0].rpartition(os.sep)[-1]
progress_prefix = base_name + ' Calculating individual blobs features.'
#read trajectories data with pandas
with pd.HDFStore(skeletons_file, 'r') as ske_file_id:
trajectories_data = ske_file_id['/trajectories_data']
with tables.File(skeletons_file, 'r') as ske_file_id:
dd = ske_file_id.get_node('/trajectories_data')
is_light_background = dd._v_attrs['is_light_background']
expected_fps = dd._v_attrs['expected_fps']
bgnd_param = dd._v_attrs['bgnd_param']
bgnd_param = json.loads(bgnd_param.decode("utf-8"))
#get generators to get the ROI for each frame
ROIs_generator = generateMoviesROI(masked_image_file,
trajectories_data,
progress_prefix = progress_prefix)
def _gen_rows_blocks():
block_size = 1000
#use rows for the ROIs_generator, this should balance the data in a given tread
block = []
for roi_dicts in ROIs_generator:
for irow, (roi_image, roi_corner) in roi_dicts.items():
block.append((irow, (roi_image, roi_corner)))
if len(block) == block_size:
yield block
block = []
if len(block) > 0:
yield block
def _roi2feats(block):
#from a
output= []
for irow, (roi_image, roi_corner) in block:
row_data = trajectories_data.loc[irow]
blob_mask, blob_cnt, _ = getWormMask(roi_image,
row_data['threshold'],
strel_size,
min_blob_area=row_data['area'] / 2,
is_light_background = is_light_background)
feats = _getBlobFeatures(blob_cnt, blob_mask, roi_image, roi_corner)
output.append((irow, feats))
return output
# initialize output data as a numpy recarray (pytables friendly format)
feats_names = ['coord_x', 'coord_y', 'area', 'perimeter',
'box_length', 'box_width', 'quirkiness', 'compactness',
'box_orientation', 'solidity', 'intensity_mean', 'intensity_std',
'hu0', 'hu1', 'hu2', 'hu3', 'hu4', 'hu5', 'hu6']
features_df = np.recarray(len(trajectories_data),
dtype = [(x, np.float32) for x in feats_names])
feats_generator = map(_roi2feats, _gen_rows_blocks())
for block in feats_generator:
for irow, row_dat in block:
features_df[irow] = row_dat
with tables.File(skeletons_file, 'r+') as fid:
if '/blob_features' in fid:
fid.remove_node('/blob_features')
fid.create_table(
'/',
'blob_features',
obj=features_df,
filters=TABLE_FILTERS)
assert all(x in feats_names for x in fid.get_node('/blob_features').colnames)
if __name__ == '__main__':
#masked_image_file = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/short_movies/MaskedVideos/double_pick_021216/N2_N6_Set4_Pos5_Ch5_02122016_160343.hdf5'
masked_image_file = '/Volumes/behavgenom_archive$/Serena/MaskedVideos/recording 29.9 green 100-200/recording 29.9 green_X1.hdf5'
skeletons_file = masked_image_file.replace('MaskedVideos', 'Results').replace('.hdf5', '_skeletons.hdf5')
is_light_background = True
strel_size = 5
getBlobFeats(skeletons_file, masked_image_file, is_light_background, strel_size) | mit |
thangbui/sparseGP_powerEP | python/sgp/tests/test_PEP_GPy.py | 1 | 3288 | from ..pep.PEP_reg import PEP
import GPy
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
np.random.seed(20)
N = 20 # Number of data points
M = 2 # Number of inducing points
X = np.c_[np.linspace(0, 10, N)] # Data X values
X_B = np.c_[(np.max(X)-np.min(X))*np.random.uniform(0, 1, M)+np.min(X)] + 2
lik_noise_var = 0.1
X_T = np.c_[np.linspace(0,10, 100)] # use the same covariance matrix!
X_X_T = np.vstack((X, X_T))
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
Y_full = np.c_[np.random.multivariate_normal(
np.zeros(X_X_T.shape[0]),
k.K(X_X_T)+np.eye(X_X_T.shape[0])*lik_noise_var)]
Y = np.c_[Y_full[:N]]
Y_T = np.c_[Y_full[N:]]
plt.figure(figsize=(20,10))
plt.scatter(X, Y, color='k')
X_plot = np.c_[np.linspace(-2, 12, 500)]
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'VFE'
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
model.Z.unconstrain()
model.optimize('bfgs', messages=True, max_iters=2e3)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'g', label='VFE')
plt.plot(X_plot, m+2*np.sqrt(V),'g--')
plt.plot(X_plot, m-2*np.sqrt(V),'g--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='g')
vfe_lml = model.log_likelihood()
print 'VFE: ', vfe_lml
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'FITC'
model.inference_method = GPy.inference.latent_function_inference.FITC()
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
model.Z.unconstrain()
model.optimize('bfgs', messages=True, max_iters=2e3)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'b', label='FITC')
plt.plot(X_plot, m+2*np.sqrt(V),'b--')
plt.plot(X_plot, m-2*np.sqrt(V),'b--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='b')
fitc_lml = model.log_likelihood()
print 'FITC: ', fitc_lml
alpha = 0.5
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'POWER-EP'
model.inference_method = PEP(alpha=alpha)
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
# print model.checkgrad()
model.optimize('bfgs', messages=True, max_iters=2e3)
# model.optimize_restarts(num_restarts = 10)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'r', label='Power-EP, alpha %.2f' % alpha)
plt.plot(X_plot, m+2*np.sqrt(V),'r--')
plt.plot(X_plot, m-2*np.sqrt(V),'r--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='r')
pep_lml = model.log_likelihood()
print 'Power EP: ', pep_lml
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.GPRegression(X,Y,k, noise_var=lik_noise_var)
model.name = 'FULL'
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
# print model.checkgrad()
model.optimize('bfgs', messages=True, max_iters=2e3)
# model.optimize_restarts(num_restarts = 10)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'k', label='FULL GP')
plt.plot(X_plot, m+2*np.sqrt(V),'k--')
plt.plot(X_plot, m-2*np.sqrt(V),'k--')
full_lml = model.log_likelihood()
print 'FULL: ', full_lml
plt.legend()
plt.show() | gpl-3.0 |
lscheinkman/nupic | src/nupic/frameworks/viz/networkx_renderer.py | 14 | 1425 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import networkx as nx
import matplotlib.pyplot as plt
class NetworkXRenderer(object):
"""
Network visualization "renderer" implementation to render a network with
graphviz.
"""
def __init__(self, layoutFn=nx.spring_layout):
self.layoutFn = layoutFn
def render(self, graph):
pos = self.layoutFn(graph)
nx.draw_networkx(graph, pos)
nx.draw_networkx_edge_labels(graph, pos, clip_on=False, rotate=False)
plt.show()
| agpl-3.0 |
koverholt/ibis | ibis/tests/test_comms.py | 16 | 11505 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
import pytest
import numpy as np
from ibis.util import guid
from ibis.compat import unittest
try:
import ibis.comms as comms
from ibis.comms import (SharedMmap, IbisType, IbisTableReader,
IbisTableWriter)
SKIP_TESTS = False
except ImportError:
SKIP_TESTS = True
def _nuke(path):
try:
os.remove(path)
except os.error:
pass
pytestmark = pytest.mark.skipif(SKIP_TESTS,
reason='Comms extension disabled')
class TestIPCLock(unittest.TestCase):
def setUp(self):
if sys.platform == 'darwin':
raise unittest.SkipTest
self.timeout = 1
self.master = comms.IPCLock(is_slave=0, lock_timeout_ms=self.timeout)
self.slave = comms.IPCLock(self.master.semaphore_id,
lock_timeout_ms=self.timeout)
def test_acquire_and_release(self):
# It's not our turn
self.assertFalse(self.master.acquire(block=False))
self.slave.acquire()
self.slave.release()
self.assertTrue(self.master.acquire())
def test_cleanup_semaphore_arrays(self):
# Otherwise, there will be too many semaphore arrays floating around
for i in range(500):
comms.IPCLock(is_slave=0)
def test_thread_blocking(self):
lock = threading.Lock()
results = []
# This also verifies that the GIL is correctly dropped
def ping():
while True:
with self.slave:
with lock:
if len(results) == 4:
break
results.append('ping')
def pong():
while True:
with self.master:
with lock:
if len(results) == 4:
break
results.append('pong')
t1 = threading.Thread(target=pong)
t1.start()
t2 = threading.Thread(target=ping)
t2.start()
t1.join()
t2.join()
ex_results = ['ping', 'pong'] * 2
assert results == ex_results
class TestSharedMmap(unittest.TestCase):
def setUp(self):
self.to_nuke = []
def tearDown(self):
for path in self.to_nuke:
_nuke(path)
def test_create_file(self):
size = 1024
path = guid()
try:
mm = SharedMmap(path, size, create=True)
mm.close()
self.assertTrue(os.path.exists(path))
self.assertEqual(os.stat(path).st_size, size)
finally:
_nuke(path)
def test_file_not_exist(self):
path = guid()
self.assertRaises(IOError, SharedMmap, path, 1024)
self.assertRaises(IOError, SharedMmap, path, 1024, offset=20,
create=True)
def test_close_file(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.closed is False
mm.close()
assert mm.closed is True
# idempotent
mm.close()
assert mm.closed is True
self.assertRaises(IOError, mm.read, 4)
self.assertRaises(IOError, mm.write, 'bazqux')
self.assertRaises(IOError, mm.seek, 0)
self.assertRaises(IOError, mm.flush)
def test_file_interface(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.tell() == 0
mm.write(data)
assert mm.tell() == len(data)
mm.seek(0)
assert mm.tell() == 0
result = mm.read(16)
assert len(result) == 16
assert result == data[:16]
assert mm.tell() == 16
def test_multiple_mmaps(self):
path = guid()
path2 = guid()
data = guid()
self.to_nuke.extend([path, path2])
mm1 = SharedMmap(path, len(data), create=True)
mm1.write(data)
mm2 = SharedMmap(path, len(data))
result = mm2.read()
self.assertEqual(result, data)
# Open both maps first, see if data synchronizes
mm1 = SharedMmap(path2, len(data), create=True)
mm2 = SharedMmap(path2, len(data))
mm1.write(data)
result = mm2.read()
self.assertEqual(result, data)
def rand_bool(N):
return np.random.randint(0, 2, size=N).astype(np.uint8)
def rand_int_span(dtype, N):
info = np.iinfo(dtype)
lo, hi = info.min, info.max
return np.random.randint(lo, hi, size=N).astype(dtype)
def bool_ex(N):
mask = rand_bool(N)
values = rand_bool(N)
return _to_masked(values, mask, IbisType.BOOLEAN)
def int_ex(N, ibis_type):
mask = rand_bool(N)
nptype = comms._ibis_to_numpy[ibis_type]
values = rand_int_span(nptype, N)
return _to_masked(values, mask, ibis_type)
def double_ex(N):
mask = rand_bool(N)
values = np.random.randn(N)
return _to_masked(values, mask, IbisType.DOUBLE)
def _to_masked(values, mask, dtype):
return comms.masked_from_numpy(values, mask, dtype)
class TestImpalaMaskedFormat(unittest.TestCase):
"""
Check that data makes it to and from the file format, and that it can be
correctly transformed to the appropriate NumPy/pandas/etc. format
"""
N = 1000
def _check_roundtrip(self, columns):
writer = IbisTableWriter(columns)
table_size = writer.total_size()
buf = comms.RAMBuffer(table_size)
writer.write(buf)
buf.seek(0)
reader = IbisTableReader(buf)
for i, expected in enumerate(columns):
result = reader.get_column(i)
assert result.equals(expected)
def test_basic_diverse_table(self):
columns = [
bool_ex(self.N),
int_ex(self.N, IbisType.TINYINT),
int_ex(self.N, IbisType.SMALLINT),
int_ex(self.N, IbisType.INT),
int_ex(self.N, IbisType.BIGINT)
]
self._check_roundtrip(columns)
def test_boolean(self):
col = bool_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
# Booleans with nulls will come out as object arrays with None for each
# null. This is how pandas handles things
result = col.to_numpy_for_pandas()
assert result.dtype == object
_check_masked_correct(col, result, np.bool_,
lambda x: x is None)
# No nulls, get boolean dtype
mask = np.zeros(self.N, dtype=np.uint8)
values = rand_bool(self.N)
col2 = _to_masked(values, mask, IbisType.BOOLEAN)
result2 = col2.to_numpy_for_pandas()
_check_masked_correct(col2, result2, np.bool_,
lambda x: x is None)
# Get a numpy.ma.MaskedArray
# masked_result = col.to_masked_array()
# didn't copy
# assert not masked_result.flags.owndata
# assert masked_result.base is col
# For each integer type, address conversion back to NumPy rep's: masked
# array, pandas-compatible (nulls force upcast to float + NaN for NULL)
def test_tinyint(self):
col = int_ex(self.N, IbisType.TINYINT)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int8)
_check_pandas_ints_no_nulls(self.N, IbisType.TINYINT)
def test_smallint(self):
col = int_ex(self.N, IbisType.SMALLINT)
self.assertEqual(col.nbytes(), self.N * 3)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int16)
_check_pandas_ints_no_nulls(self.N, IbisType.SMALLINT)
def test_int(self):
col = int_ex(self.N, IbisType.INT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int32)
_check_pandas_ints_no_nulls(self.N, IbisType.INT)
def test_int_segfault(self):
col = int_ex(1000000, IbisType.INT)
col.to_numpy_for_pandas()
def test_bigint(self):
col = int_ex(self.N, IbisType.BIGINT)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int64)
_check_pandas_ints_no_nulls(self.N, IbisType.BIGINT)
def test_float(self):
mask = rand_bool(self.N)
values = np.random.randn(self.N).astype(np.float32)
col = _to_masked(values, mask, IbisType.FLOAT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float32
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_double(self):
col = double_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_string_pyobject(self):
# pandas handles strings in object-type (NPY_OBJECT) arrays and uses
# either None or NaN for nulls. For the time being we'll be consistent
# with that
#
pass
def test_timestamp(self):
pass
def test_decimal(self):
pass
def test_multiple_string_columns(self):
# For the time being, string (STRING, VARCHAR, CHAR) columns will all
# share the same intern table
pass
def _check_pandas_ints_nulls(col, dtype):
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
_check_masked_correct(col, result, dtype, np.isnan)
def _check_pandas_ints_no_nulls(N, ibis_type):
nptype = comms._ibis_to_numpy[ibis_type]
mask = np.zeros(N, dtype=np.uint8)
values = rand_int_span(nptype, N)
col = _to_masked(values, mask, ibis_type)
result = col.to_numpy_for_pandas()
assert result.dtype == nptype
_check_masked_correct(col, result, nptype, lambda x: False)
def _check_masked_correct(col, result, dtype, is_na_f):
mask = col.mask()
data = col.data_bytes().view(dtype)
for i, v in enumerate(result):
if mask[i]:
assert is_na_f(v)
else:
# For comparisons outside representable integer range, this may
# yield incorrect results
assert v == data[i]
class TestTableRoundTrip(unittest.TestCase):
"""
Test things not captured by datatype-specific tests
"""
def test_table_metadata(self):
# Check values from preamble
pass
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.