repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
MAVENSDC/PyTplot | pytplot/QtPlotter/generate.py | _set_pyqtgraph_title | def _set_pyqtgraph_title(layout):
"""
Private function to add a title to the first row of the window.
Returns True if a Title is set. Else, returns False.
"""
if 'title_size' in pytplot.tplot_opt_glob:
size = pytplot.tplot_opt_glob['title_size']
if 'title_text' in pytplot.tplot_opt_glob:
if pytplot.tplot_opt_glob['title_text'] != '':
layout.addItem(LabelItem(pytplot.tplot_opt_glob['title_text'], size=size, color='k'), row=0, col=0)
return True
return False | python | def _set_pyqtgraph_title(layout):
"""
Private function to add a title to the first row of the window.
Returns True if a Title is set. Else, returns False.
"""
if 'title_size' in pytplot.tplot_opt_glob:
size = pytplot.tplot_opt_glob['title_size']
if 'title_text' in pytplot.tplot_opt_glob:
if pytplot.tplot_opt_glob['title_text'] != '':
layout.addItem(LabelItem(pytplot.tplot_opt_glob['title_text'], size=size, color='k'), row=0, col=0)
return True
return False | Private function to add a title to the first row of the window.
Returns True if a Title is set. Else, returns False. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/generate.py#L102-L113 |
MAVENSDC/PyTplot | pytplot/store_data.py | store_data | def store_data(name, data=None, delete=False, newname=None):
"""
This function creates a "Tplot Variable" based on the inputs, and
stores this data in memory. Tplot Variables store all of the information
needed to generate a plot.
Parameters:
name : str
Name of the tplot variable that will be created
data : dict
A python dictionary object.
'x' should be a 1-dimensional array that represents the data's x axis. Typically this data is time,
represented in seconds since epoch (January 1st 1970)
'y' should be the data values. This can be 2 dimensions if multiple lines or a spectrogram are desired.
'v' is optional, and is only used for spectrogram plots. This will be a list of bins to be used. If this
is provided, then 'y' should have dimensions of x by z.
'x' and 'y' can be any data format that can be read in by the pandas module. Python lists, numpy arrays,
or any pandas data type will all work.
delete : bool, optional
Deletes the tplot variable matching the "name" parameter
newname: str
Renames TVar to new name
.. note::
If you want to combine multiple tplot variables into one, simply supply the list of tplot variables to the
"data" parameter. This will cause the data to overlay when plotted.
Returns:
None
Examples:
>>> # Store a single line
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> # Store a two lines
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> # Store a spectrogram
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> # Combine two different line plots
>>> pytplot.store_data("Variable1and2", data=['Variable1', 'Variable2'])
>>> #Rename TVar
>>> pytplot.store_data('a', data={'x':[0,4,8,12,16], 'y':[1,2,3,4,5]})
>>> pytplot.store_data('a',newname='f')
"""
global tplot_num
create_time = datetime.datetime.now()
if delete is True:
del_data(name)
return
if data is None and newname is None:
print('Please provide data.')
return
if newname is not None:
pytplot.tplot_rename(name, newname)
return
if isinstance(data, list):
base_data = get_base_tplot_vars(data)
# Use first tplot var as the time range
trange = [np.nanmin(data_quants[base_data[0]].data.index),
np.nanmax(data_quants[base_data[0]].data.index)]
df = base_data
spec_bins = None
else:
df = format_ydata(data['y'])
times = data['x']
# If given a list of datetime objects, convert times to seconds since epoch.
if any(isinstance(t, datetime.datetime) for t in times):
for tt, time in enumerate(times):
times[tt] = (time-datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)).total_seconds()
# If given a list of datetime string, convert times to seconds since epoch
elif any(isinstance(t, str) for t in times):
for tt, time in enumerate(times):
times[tt] = pytplot.tplot_utilities.str_to_int(time)
if len(times) != len(df.index):
print("The lengths of x and y do not match!")
return
elif isinstance(times, pd.Series):
df = df.set_index(data['x'])
else:
df['Index'] = times
df = df.set_index('Index', drop=True)
trange = [np.nanmin(times), np.nanmax(times)]
if 'v' in data or 'v2' in data:
# Generally the data is 1D, but occasionally
# the bins will vary in time.
if 'v' in data:
spec_bins = data['v']
else:
spec_bins = data['v2']
if type(spec_bins) is not pd.DataFrame:
spec_bins = pd.DataFrame(spec_bins)
if len(spec_bins.columns) != 1:
if len(spec_bins) == len(df.index):
spec_bins = spec_bins.set_index(df.index)
else:
print("Length of v and x do not match. Cannot create tplot variable.")
return
else:
spec_bins = spec_bins.transpose()
else:
spec_bins = None
xaxis_opt = dict(axis_label='Time')
yaxis_opt = dict(axis_label=name) if (spec_bins is None) else dict(axis_label='')
zaxis_opt = dict(axis_label='') if (spec_bins is None) else dict(axis_label=name)
line_opt = {}
dtype = ''
time_bar = []
# Dictionary to keep track of extra details needed for plotting
# that aren't actual attributes in Bokeh
extras = dict(panel_size=1)
links = {}
temp = TVar(name, tplot_num, df, spec_bins, xaxis_opt, yaxis_opt, zaxis_opt, line_opt,
trange, dtype, create_time, time_bar, extras, links)
data_quants[name] = temp
data_quants[name].yaxis_opt['y_range'] = get_y_range(df, spec_bins)
return | python | def store_data(name, data=None, delete=False, newname=None):
"""
This function creates a "Tplot Variable" based on the inputs, and
stores this data in memory. Tplot Variables store all of the information
needed to generate a plot.
Parameters:
name : str
Name of the tplot variable that will be created
data : dict
A python dictionary object.
'x' should be a 1-dimensional array that represents the data's x axis. Typically this data is time,
represented in seconds since epoch (January 1st 1970)
'y' should be the data values. This can be 2 dimensions if multiple lines or a spectrogram are desired.
'v' is optional, and is only used for spectrogram plots. This will be a list of bins to be used. If this
is provided, then 'y' should have dimensions of x by z.
'x' and 'y' can be any data format that can be read in by the pandas module. Python lists, numpy arrays,
or any pandas data type will all work.
delete : bool, optional
Deletes the tplot variable matching the "name" parameter
newname: str
Renames TVar to new name
.. note::
If you want to combine multiple tplot variables into one, simply supply the list of tplot variables to the
"data" parameter. This will cause the data to overlay when plotted.
Returns:
None
Examples:
>>> # Store a single line
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> # Store a two lines
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> # Store a spectrogram
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> # Combine two different line plots
>>> pytplot.store_data("Variable1and2", data=['Variable1', 'Variable2'])
>>> #Rename TVar
>>> pytplot.store_data('a', data={'x':[0,4,8,12,16], 'y':[1,2,3,4,5]})
>>> pytplot.store_data('a',newname='f')
"""
global tplot_num
create_time = datetime.datetime.now()
if delete is True:
del_data(name)
return
if data is None and newname is None:
print('Please provide data.')
return
if newname is not None:
pytplot.tplot_rename(name, newname)
return
if isinstance(data, list):
base_data = get_base_tplot_vars(data)
# Use first tplot var as the time range
trange = [np.nanmin(data_quants[base_data[0]].data.index),
np.nanmax(data_quants[base_data[0]].data.index)]
df = base_data
spec_bins = None
else:
df = format_ydata(data['y'])
times = data['x']
# If given a list of datetime objects, convert times to seconds since epoch.
if any(isinstance(t, datetime.datetime) for t in times):
for tt, time in enumerate(times):
times[tt] = (time-datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)).total_seconds()
# If given a list of datetime string, convert times to seconds since epoch
elif any(isinstance(t, str) for t in times):
for tt, time in enumerate(times):
times[tt] = pytplot.tplot_utilities.str_to_int(time)
if len(times) != len(df.index):
print("The lengths of x and y do not match!")
return
elif isinstance(times, pd.Series):
df = df.set_index(data['x'])
else:
df['Index'] = times
df = df.set_index('Index', drop=True)
trange = [np.nanmin(times), np.nanmax(times)]
if 'v' in data or 'v2' in data:
# Generally the data is 1D, but occasionally
# the bins will vary in time.
if 'v' in data:
spec_bins = data['v']
else:
spec_bins = data['v2']
if type(spec_bins) is not pd.DataFrame:
spec_bins = pd.DataFrame(spec_bins)
if len(spec_bins.columns) != 1:
if len(spec_bins) == len(df.index):
spec_bins = spec_bins.set_index(df.index)
else:
print("Length of v and x do not match. Cannot create tplot variable.")
return
else:
spec_bins = spec_bins.transpose()
else:
spec_bins = None
xaxis_opt = dict(axis_label='Time')
yaxis_opt = dict(axis_label=name) if (spec_bins is None) else dict(axis_label='')
zaxis_opt = dict(axis_label='') if (spec_bins is None) else dict(axis_label=name)
line_opt = {}
dtype = ''
time_bar = []
# Dictionary to keep track of extra details needed for plotting
# that aren't actual attributes in Bokeh
extras = dict(panel_size=1)
links = {}
temp = TVar(name, tplot_num, df, spec_bins, xaxis_opt, yaxis_opt, zaxis_opt, line_opt,
trange, dtype, create_time, time_bar, extras, links)
data_quants[name] = temp
data_quants[name].yaxis_opt['y_range'] = get_y_range(df, spec_bins)
return | This function creates a "Tplot Variable" based on the inputs, and
stores this data in memory. Tplot Variables store all of the information
needed to generate a plot.
Parameters:
name : str
Name of the tplot variable that will be created
data : dict
A python dictionary object.
'x' should be a 1-dimensional array that represents the data's x axis. Typically this data is time,
represented in seconds since epoch (January 1st 1970)
'y' should be the data values. This can be 2 dimensions if multiple lines or a spectrogram are desired.
'v' is optional, and is only used for spectrogram plots. This will be a list of bins to be used. If this
is provided, then 'y' should have dimensions of x by z.
'x' and 'y' can be any data format that can be read in by the pandas module. Python lists, numpy arrays,
or any pandas data type will all work.
delete : bool, optional
Deletes the tplot variable matching the "name" parameter
newname: str
Renames TVar to new name
.. note::
If you want to combine multiple tplot variables into one, simply supply the list of tplot variables to the
"data" parameter. This will cause the data to overlay when plotted.
Returns:
None
Examples:
>>> # Store a single line
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> # Store a two lines
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> # Store a spectrogram
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> # Combine two different line plots
>>> pytplot.store_data("Variable1and2", data=['Variable1', 'Variable2'])
>>> #Rename TVar
>>> pytplot.store_data('a', data={'x':[0,4,8,12,16], 'y':[1,2,3,4,5]})
>>> pytplot.store_data('a',newname='f') | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/store_data.py#L18-L163 |
MAVENSDC/PyTplot | pytplot/get_data.py | get_data | def get_data(name):
"""
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
"""
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val) | python | def get_data(name):
"""
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
"""
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val) | This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1") | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/get_data.py#L8-L39 |
MAVENSDC/PyTplot | pytplot/QtPlotter/CustomAxis/AxisItem.py | AxisItem.generateDrawSpecs | def generateDrawSpecs(self, p):
"""
Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture().
"""
profiler = debug.Profiler()
# bounds = self.boundingRect()
bounds = self.mapRectFromParent(self.geometry())
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
tickBounds = bounds
else:
tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect())
if self.orientation == 'left':
span = (bounds.topRight(), bounds.bottomRight())
tickStart = tickBounds.right()
tickStop = bounds.right()
tickDir = -1
axis = 0
elif self.orientation == 'right':
span = (bounds.topLeft(), bounds.bottomLeft())
tickStart = tickBounds.left()
tickStop = bounds.left()
tickDir = 1
axis = 0
elif self.orientation == 'top':
span = (bounds.bottomLeft(), bounds.bottomRight())
tickStart = tickBounds.bottom()
tickStop = bounds.bottom()
tickDir = -1
axis = 1
elif self.orientation == 'bottom':
span = (bounds.topLeft(), bounds.topRight())
tickStart = tickBounds.top()
tickStop = bounds.top()
tickDir = 1
axis = 1
# print tickStart, tickStop, span
## determine size of this item in pixels
points = list(map(self.mapToDevice, span))
if None in points:
return
lengthInPixels = Point(points[1] - points[0]).length()
if lengthInPixels == 0:
return
# Determine major / minor / subminor axis ticks
if self._tickLevels is None:
tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels)
tickStrings = None
else:
## parse self.tickLevels into the formats returned by tickLevels() and tickStrings()
tickLevels = []
tickStrings = []
for level in self._tickLevels:
values = []
strings = []
tickLevels.append((None, values))
tickStrings.append(strings)
for val, strn in level:
values.append(val)
strings.append(strn)
## determine mapping between tick values and local coordinates
dif = self.range[1] - self.range[0]
if dif == 0:
xScale = 1
offset = 0
else:
if axis == 0:
xScale = -bounds.height() / dif
offset = self.range[0] * xScale - bounds.height()
else:
xScale = bounds.width() / dif
offset = self.range[0] * xScale
xRange = [x * xScale - offset for x in self.range]
xMin = min(xRange)
xMax = max(xRange)
profiler('init')
tickPositions = [] # remembers positions of previously drawn ticks
## compute coordinates to draw ticks
## draw three different intervals, long ticks first
tickSpecs = []
for i in range(len(tickLevels)):
tickPositions.append([])
ticks = tickLevels[i][1]
## length of tick
tickLength = self.style['tickLength'] / ((i * 0.5) + 1.0)
lineAlpha = 255 / (i + 1)
if self.grid is not False:
lineAlpha *= self.grid / 255. * np.clip((0.05 * lengthInPixels / (len(ticks) + 1)), 0., 1.)
for v in ticks:
## determine actual position to draw this tick
x = (v * xScale) - offset
if x < xMin or x > xMax: ## last check to make sure no out-of-bounds ticks are drawn
tickPositions[i].append(None)
continue
tickPositions[i].append(x)
p1 = [x, x]
p2 = [x, x]
p1[axis] = tickStart
p2[axis] = tickStop
if self.grid is False:
p2[axis] += tickLength * tickDir
tickPen = self.pen()
color = tickPen.color()
color.setAlpha(lineAlpha)
tickPen.setColor(color)
tickSpecs.append((tickPen, Point(p1), Point(p2)))
profiler('compute ticks')
if self.style['stopAxisAtTick'][0] is True:
stop = max(span[0].y(), min(map(min, tickPositions)))
if axis == 0:
span[0].setY(stop)
else:
span[0].setX(stop)
if self.style['stopAxisAtTick'][1] is True:
stop = min(span[1].y(), max(map(max, tickPositions)))
if axis == 0:
span[1].setY(stop)
else:
span[1].setX(stop)
axisSpec = (self.pen(), span[0], span[1])
textOffset = self.style['tickTextOffset'][axis] ## spacing between axis and text
# if self.style['autoExpandTextSpace'] is True:
# textWidth = self.textWidth
# textHeight = self.textHeight
# else:
# textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text
# textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text
textSize2 = 0
textRects = []
textSpecs = [] ## list of draw
# If values are hidden, return early
if not self.style['showValues']:
return (axisSpec, tickSpecs, textSpecs)
for i in range(min(len(tickLevels), self.style['maxTextLevel'] + 1)):
## Get the list of strings to display for this level
if tickStrings is None:
spacing, values = tickLevels[i]
strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing)
else:
strings = tickStrings[i]
if len(strings) == 0:
continue
## ignore strings belonging to ticks that were previously ignored
for j in range(len(strings)):
if tickPositions[i][j] is None:
strings[j] = None
## Measure density of text; decide whether to draw this level
rects = []
for s in strings:
if s is None:
rects.append(None)
else:
br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s))
## boundingRect is usually just a bit too large
## (but this probably depends on per-font metrics?)
br.setHeight(br.height() * 1.4)
rects.append(br)
textRects.append(rects[-1])
if len(textRects) > 0:
## measure all text, make sure there's enough room
if axis == 0:
textSize = np.sum([r.height() for r in textRects])
textSize2 = np.max([r.width() for r in textRects])
else:
textSize = np.sum([r.width() for r in textRects])
textSize2 = np.max([r.height() for r in textRects])
else:
textSize = 0
textSize2 = 0
if i > 0: ## always draw top level
## If the strings are too crowded, stop drawing text now.
## We use three different crowding limits based on the number
## of texts drawn so far.
textFillRatio = float(textSize) / lengthInPixels
finished = False
for nTexts, limit in self.style['textFillLimits']:
if len(textSpecs) >= nTexts and textFillRatio >= limit:
finished = True
break
if finished:
break
# spacing, values = tickLevels[best]
# strings = self.tickStrings(values, self.scale, spacing)
# Determine exactly where tick text should be drawn
for j in range(len(strings)):
vstr = strings[j]
if vstr is None: ## this tick was ignored because it is out of bounds
continue
vstr = asUnicode(vstr)
x = tickPositions[i][j]
# textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr)
textRect = rects[j]
height = textRect.height()
width = textRect.width()
# self.textHeight = height
offset = max(0, self.style['tickLength']) + textOffset
if self.orientation == 'left':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop - offset - width, x - (height / 2), width, height)
elif self.orientation == 'right':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop + offset, x - (height / 2), width, height)
elif self.orientation == 'top':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom
rect = QtCore.QRectF(x - width / 2., tickStop - offset - height, width, height)
elif self.orientation == 'bottom':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop
rect = QtCore.QRectF(x - width / 2., tickStop + offset, width, height)
# p.setPen(self.pen())
# p.drawText(rect, textFlags, vstr)
textSpecs.append((rect, textFlags, vstr))
profiler('compute text')
## update max text size if needed.
self._updateMaxTextSize(textSize2)
return (axisSpec, tickSpecs, textSpecs) | python | def generateDrawSpecs(self, p):
"""
Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture().
"""
profiler = debug.Profiler()
# bounds = self.boundingRect()
bounds = self.mapRectFromParent(self.geometry())
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
tickBounds = bounds
else:
tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect())
if self.orientation == 'left':
span = (bounds.topRight(), bounds.bottomRight())
tickStart = tickBounds.right()
tickStop = bounds.right()
tickDir = -1
axis = 0
elif self.orientation == 'right':
span = (bounds.topLeft(), bounds.bottomLeft())
tickStart = tickBounds.left()
tickStop = bounds.left()
tickDir = 1
axis = 0
elif self.orientation == 'top':
span = (bounds.bottomLeft(), bounds.bottomRight())
tickStart = tickBounds.bottom()
tickStop = bounds.bottom()
tickDir = -1
axis = 1
elif self.orientation == 'bottom':
span = (bounds.topLeft(), bounds.topRight())
tickStart = tickBounds.top()
tickStop = bounds.top()
tickDir = 1
axis = 1
# print tickStart, tickStop, span
## determine size of this item in pixels
points = list(map(self.mapToDevice, span))
if None in points:
return
lengthInPixels = Point(points[1] - points[0]).length()
if lengthInPixels == 0:
return
# Determine major / minor / subminor axis ticks
if self._tickLevels is None:
tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels)
tickStrings = None
else:
## parse self.tickLevels into the formats returned by tickLevels() and tickStrings()
tickLevels = []
tickStrings = []
for level in self._tickLevels:
values = []
strings = []
tickLevels.append((None, values))
tickStrings.append(strings)
for val, strn in level:
values.append(val)
strings.append(strn)
## determine mapping between tick values and local coordinates
dif = self.range[1] - self.range[0]
if dif == 0:
xScale = 1
offset = 0
else:
if axis == 0:
xScale = -bounds.height() / dif
offset = self.range[0] * xScale - bounds.height()
else:
xScale = bounds.width() / dif
offset = self.range[0] * xScale
xRange = [x * xScale - offset for x in self.range]
xMin = min(xRange)
xMax = max(xRange)
profiler('init')
tickPositions = [] # remembers positions of previously drawn ticks
## compute coordinates to draw ticks
## draw three different intervals, long ticks first
tickSpecs = []
for i in range(len(tickLevels)):
tickPositions.append([])
ticks = tickLevels[i][1]
## length of tick
tickLength = self.style['tickLength'] / ((i * 0.5) + 1.0)
lineAlpha = 255 / (i + 1)
if self.grid is not False:
lineAlpha *= self.grid / 255. * np.clip((0.05 * lengthInPixels / (len(ticks) + 1)), 0., 1.)
for v in ticks:
## determine actual position to draw this tick
x = (v * xScale) - offset
if x < xMin or x > xMax: ## last check to make sure no out-of-bounds ticks are drawn
tickPositions[i].append(None)
continue
tickPositions[i].append(x)
p1 = [x, x]
p2 = [x, x]
p1[axis] = tickStart
p2[axis] = tickStop
if self.grid is False:
p2[axis] += tickLength * tickDir
tickPen = self.pen()
color = tickPen.color()
color.setAlpha(lineAlpha)
tickPen.setColor(color)
tickSpecs.append((tickPen, Point(p1), Point(p2)))
profiler('compute ticks')
if self.style['stopAxisAtTick'][0] is True:
stop = max(span[0].y(), min(map(min, tickPositions)))
if axis == 0:
span[0].setY(stop)
else:
span[0].setX(stop)
if self.style['stopAxisAtTick'][1] is True:
stop = min(span[1].y(), max(map(max, tickPositions)))
if axis == 0:
span[1].setY(stop)
else:
span[1].setX(stop)
axisSpec = (self.pen(), span[0], span[1])
textOffset = self.style['tickTextOffset'][axis] ## spacing between axis and text
# if self.style['autoExpandTextSpace'] is True:
# textWidth = self.textWidth
# textHeight = self.textHeight
# else:
# textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text
# textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text
textSize2 = 0
textRects = []
textSpecs = [] ## list of draw
# If values are hidden, return early
if not self.style['showValues']:
return (axisSpec, tickSpecs, textSpecs)
for i in range(min(len(tickLevels), self.style['maxTextLevel'] + 1)):
## Get the list of strings to display for this level
if tickStrings is None:
spacing, values = tickLevels[i]
strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing)
else:
strings = tickStrings[i]
if len(strings) == 0:
continue
## ignore strings belonging to ticks that were previously ignored
for j in range(len(strings)):
if tickPositions[i][j] is None:
strings[j] = None
## Measure density of text; decide whether to draw this level
rects = []
for s in strings:
if s is None:
rects.append(None)
else:
br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s))
## boundingRect is usually just a bit too large
## (but this probably depends on per-font metrics?)
br.setHeight(br.height() * 1.4)
rects.append(br)
textRects.append(rects[-1])
if len(textRects) > 0:
## measure all text, make sure there's enough room
if axis == 0:
textSize = np.sum([r.height() for r in textRects])
textSize2 = np.max([r.width() for r in textRects])
else:
textSize = np.sum([r.width() for r in textRects])
textSize2 = np.max([r.height() for r in textRects])
else:
textSize = 0
textSize2 = 0
if i > 0: ## always draw top level
## If the strings are too crowded, stop drawing text now.
## We use three different crowding limits based on the number
## of texts drawn so far.
textFillRatio = float(textSize) / lengthInPixels
finished = False
for nTexts, limit in self.style['textFillLimits']:
if len(textSpecs) >= nTexts and textFillRatio >= limit:
finished = True
break
if finished:
break
# spacing, values = tickLevels[best]
# strings = self.tickStrings(values, self.scale, spacing)
# Determine exactly where tick text should be drawn
for j in range(len(strings)):
vstr = strings[j]
if vstr is None: ## this tick was ignored because it is out of bounds
continue
vstr = asUnicode(vstr)
x = tickPositions[i][j]
# textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr)
textRect = rects[j]
height = textRect.height()
width = textRect.width()
# self.textHeight = height
offset = max(0, self.style['tickLength']) + textOffset
if self.orientation == 'left':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop - offset - width, x - (height / 2), width, height)
elif self.orientation == 'right':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop + offset, x - (height / 2), width, height)
elif self.orientation == 'top':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom
rect = QtCore.QRectF(x - width / 2., tickStop - offset - height, width, height)
elif self.orientation == 'bottom':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop
rect = QtCore.QRectF(x - width / 2., tickStop + offset, width, height)
# p.setPen(self.pen())
# p.drawText(rect, textFlags, vstr)
textSpecs.append((rect, textFlags, vstr))
profiler('compute text')
## update max text size if needed.
self._updateMaxTextSize(textSize2)
return (axisSpec, tickSpecs, textSpecs) | Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture(). | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/CustomAxis/AxisItem.py#L40-L285 |
MAVENSDC/PyTplot | pytplot/staticplot.py | static2dplot | def static2dplot(var, time):
""" If the static option is set in tplot, and is supplied with a time, then the spectrogram plot(s) for which
it is set will have another window pop up, with y and z values plotted at the specified time. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D static plot
pytplot.static_window = pg.GraphicsWindow()
pytplot.static_window.resize(1000, 600)
pytplot.static_window.setWindowTitle('Static Window')
plot = pytplot.static_window.addPlot(title='2D Static Plot', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
if var in valid_variables:
# Get min/max values of data's time range (in both datetime and seconds since epoch)
t_min = np.nanmin(time_values)
t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values))
t_min_conv_back = tplot_utilities.str_to_int(t_min_str)
t_max = np.nanmax(time_values)
t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values))
t_max_conv_back = tplot_utilities.str_to_int(t_max_str)
# Convert user input to seconds since epoch
user_time = tplot_utilities.str_to_int(time)
# Covering situation where user entered a time not in the dataset!
# As long as they used a time in the dataset, this will not trigger.
if user_time not in range(t_min_conv_back, t_max_conv_back+1):
while True:
try:
user_time = tplot_utilities.str_to_int(input(
'Chosen time not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S). '.format(
t_min_str, t_max_str)))
except:
continue
else:
if user_time not in range(int(t_min), int(t_max)):
continue
else:
break
# Get time closest to the user's time choice
time_array = np.array(data[var][2])
array = np.asarray(time_array)
idx = (np.abs(array - user_time)).argmin()
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx])) >= 0 and labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# Plot data based on time we're hovering over
plot_data.setData(data[name][0][:], list(data[name][1][idx])) | python | def static2dplot(var, time):
""" If the static option is set in tplot, and is supplied with a time, then the spectrogram plot(s) for which
it is set will have another window pop up, with y and z values plotted at the specified time. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D static plot
pytplot.static_window = pg.GraphicsWindow()
pytplot.static_window.resize(1000, 600)
pytplot.static_window.setWindowTitle('Static Window')
plot = pytplot.static_window.addPlot(title='2D Static Plot', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
if var in valid_variables:
# Get min/max values of data's time range (in both datetime and seconds since epoch)
t_min = np.nanmin(time_values)
t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values))
t_min_conv_back = tplot_utilities.str_to_int(t_min_str)
t_max = np.nanmax(time_values)
t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values))
t_max_conv_back = tplot_utilities.str_to_int(t_max_str)
# Convert user input to seconds since epoch
user_time = tplot_utilities.str_to_int(time)
# Covering situation where user entered a time not in the dataset!
# As long as they used a time in the dataset, this will not trigger.
if user_time not in range(t_min_conv_back, t_max_conv_back+1):
while True:
try:
user_time = tplot_utilities.str_to_int(input(
'Chosen time not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S). '.format(
t_min_str, t_max_str)))
except:
continue
else:
if user_time not in range(int(t_min), int(t_max)):
continue
else:
break
# Get time closest to the user's time choice
time_array = np.array(data[var][2])
array = np.asarray(time_array)
idx = (np.abs(array - user_time)).argmin()
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx])) >= 0 and labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# Plot data based on time we're hovering over
plot_data.setData(data[name][0][:], list(data[name][1][idx])) | If the static option is set in tplot, and is supplied with a time, then the spectrogram plot(s) for which
it is set will have another window pop up, with y and z values plotted at the specified time. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/staticplot.py#L7-L90 |
MAVENSDC/PyTplot | pytplot/netcdf_to_tplot.py | netcdf_to_tplot | def netcdf_to_tplot(filenames, time ='', prefix='', suffix='', plot=False, merge=False):
'''
This function will automatically create tplot variables from CDF files.
Parameters:
filenames : str/list of str
The file names and full paths of netCDF files.
time: str
The name of the netCDF file's time variable.
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
By default, a plot is not created.
merge: bool
If True, then data from different netCDF files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created.
Examples:
>>> #Create tplot variables from a GOES netCDF file
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='mvn_')
>>> #Add a prefix, and plot immediately.
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='goes_prefix_', plot=True)
'''
from netCDF4 import Dataset
stored_variables = []
global data_quants
if isinstance(filenames, str):
filenames = [filenames]
elif isinstance(filenames, list):
filenames = filenames
else:
print("Invalid filenames input.")
#return stored_variables
for filename in filenames:
# Read in file
file = Dataset(filename, "r+")
# Creating dictionary that will contain variables and their attributes
vars_and_atts = {}
for name, variable in file.variables.items():
vars_and_atts[name] = {}
for attrname in variable.ncattrs():
vars_and_atts[name][attrname] = getattr(variable, attrname)
# Filling in missing values for each variable with np.nan (if values are not already nan)
# and saving the masked variables to a new dictionary
masked_vars = {} # Dictionary containing properly masked variables
for var in vars_and_atts.keys():
reg_var = file.variables[var]
try:
var_fill_value = vars_and_atts[var]['missing_value']
if np.isnan(var_fill_value) != True:
# We want to force missing values to be nan so that plots don't look strange
var_mask = np.ma.masked_where(reg_var == np.float32(var_fill_value), reg_var)
var_filled = np.ma.filled(var_mask, np.nan)
masked_vars[var] = var_filled
elif np.isnan(var_fill_value) == True:
# missing values are already np.nan, don't need to do anything
var_filled = reg_var
masked_vars[var] = var_filled
except: # continue # Go to next iteration, this variable doesn't have data to mask (probably just a descriptor variable (i.e., 'base_time')
var_filled = reg_var
masked_vars[var] = var_filled
# Most files are from GOES data, which seems to usually have 'time_tag' in them that contain time information.
# There is an exception filter below that will allow a user to pick a different time variable if time_tag doesn't exist.
if time != '':
time_var = file[time]
unix_times = change_time_to_unix_time(time_var)
elif time == '':
time = input('Please enter time variable name. \nVariable list: {l}'.format(l=vars_and_atts.keys()))
while True:
if time not in vars_and_atts.keys():
# Making sure we input a valid response (i.e., the variable exists in the dataset), and also avoiding
# plotting a time variable against time.... because I don't even know what that would mean and uncover.
print('Not a valid variable name, please try again.')
continue
elif time in vars_and_atts.keys():
time_var = time
unix_times = change_time_to_unix_time(time_var)
for i,var in enumerate(file.variables):
# Here, we are making sure that the variables are time-based, otherwise we don't want to store them as tplot variables!
if 'record' in file[var].dimensions[0] or 'time' in file[var].dimensions[0]:
# Store the data now, as well as merge variables if that's desired
var_name = prefix + var + suffix
to_merge = False
if (var_name in data_quants.keys() and (merge == True)):
prev_data_quant = data_quants[var_name].data
to_merge = True
tplot_data = {'x': unix_times, 'y': masked_vars[var]}
store_data(var_name, tplot_data)
if var_name not in stored_variables:
stored_variables.append(var_name)
if to_merge == True:
cur_data_quant = data_quants[var_name].data
merged_data = [prev_data_quant, cur_data_quant]
data_quants[var_name].data = pd.concat(merged_data)
# If we are interested in seeing a quick plot of the variables, do it
if plot:
tplot(stored_variables)
else:
# If the variable isn't time-bound, we're going to look at the next variable
continue
return (stored_variables) | python | def netcdf_to_tplot(filenames, time ='', prefix='', suffix='', plot=False, merge=False):
'''
This function will automatically create tplot variables from CDF files.
Parameters:
filenames : str/list of str
The file names and full paths of netCDF files.
time: str
The name of the netCDF file's time variable.
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
By default, a plot is not created.
merge: bool
If True, then data from different netCDF files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created.
Examples:
>>> #Create tplot variables from a GOES netCDF file
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='mvn_')
>>> #Add a prefix, and plot immediately.
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='goes_prefix_', plot=True)
'''
from netCDF4 import Dataset
stored_variables = []
global data_quants
if isinstance(filenames, str):
filenames = [filenames]
elif isinstance(filenames, list):
filenames = filenames
else:
print("Invalid filenames input.")
#return stored_variables
for filename in filenames:
# Read in file
file = Dataset(filename, "r+")
# Creating dictionary that will contain variables and their attributes
vars_and_atts = {}
for name, variable in file.variables.items():
vars_and_atts[name] = {}
for attrname in variable.ncattrs():
vars_and_atts[name][attrname] = getattr(variable, attrname)
# Filling in missing values for each variable with np.nan (if values are not already nan)
# and saving the masked variables to a new dictionary
masked_vars = {} # Dictionary containing properly masked variables
for var in vars_and_atts.keys():
reg_var = file.variables[var]
try:
var_fill_value = vars_and_atts[var]['missing_value']
if np.isnan(var_fill_value) != True:
# We want to force missing values to be nan so that plots don't look strange
var_mask = np.ma.masked_where(reg_var == np.float32(var_fill_value), reg_var)
var_filled = np.ma.filled(var_mask, np.nan)
masked_vars[var] = var_filled
elif np.isnan(var_fill_value) == True:
# missing values are already np.nan, don't need to do anything
var_filled = reg_var
masked_vars[var] = var_filled
except: # continue # Go to next iteration, this variable doesn't have data to mask (probably just a descriptor variable (i.e., 'base_time')
var_filled = reg_var
masked_vars[var] = var_filled
# Most files are from GOES data, which seems to usually have 'time_tag' in them that contain time information.
# There is an exception filter below that will allow a user to pick a different time variable if time_tag doesn't exist.
if time != '':
time_var = file[time]
unix_times = change_time_to_unix_time(time_var)
elif time == '':
time = input('Please enter time variable name. \nVariable list: {l}'.format(l=vars_and_atts.keys()))
while True:
if time not in vars_and_atts.keys():
# Making sure we input a valid response (i.e., the variable exists in the dataset), and also avoiding
# plotting a time variable against time.... because I don't even know what that would mean and uncover.
print('Not a valid variable name, please try again.')
continue
elif time in vars_and_atts.keys():
time_var = time
unix_times = change_time_to_unix_time(time_var)
for i,var in enumerate(file.variables):
# Here, we are making sure that the variables are time-based, otherwise we don't want to store them as tplot variables!
if 'record' in file[var].dimensions[0] or 'time' in file[var].dimensions[0]:
# Store the data now, as well as merge variables if that's desired
var_name = prefix + var + suffix
to_merge = False
if (var_name in data_quants.keys() and (merge == True)):
prev_data_quant = data_quants[var_name].data
to_merge = True
tplot_data = {'x': unix_times, 'y': masked_vars[var]}
store_data(var_name, tplot_data)
if var_name not in stored_variables:
stored_variables.append(var_name)
if to_merge == True:
cur_data_quant = data_quants[var_name].data
merged_data = [prev_data_quant, cur_data_quant]
data_quants[var_name].data = pd.concat(merged_data)
# If we are interested in seeing a quick plot of the variables, do it
if plot:
tplot(stored_variables)
else:
# If the variable isn't time-bound, we're going to look at the next variable
continue
return (stored_variables) | This function will automatically create tplot variables from CDF files.
Parameters:
filenames : str/list of str
The file names and full paths of netCDF files.
time: str
The name of the netCDF file's time variable.
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
By default, a plot is not created.
merge: bool
If True, then data from different netCDF files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created.
Examples:
>>> #Create tplot variables from a GOES netCDF file
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='mvn_')
>>> #Add a prefix, and plot immediately.
>>> import pytplot
>>> file = "/Users/user_name/goes_files/g15_epead_a16ew_1m_20171201_20171231.nc"
>>> pytplot.netcdf_to_tplot(file, prefix='goes_prefix_', plot=True) | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/netcdf_to_tplot.py#L20-L148 |
MAVENSDC/PyTplot | pytplot/tplot_rename.py | tplot_rename | def tplot_rename(old_name, new_name):
"""
This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variable 1 to Variable 2
>>> import pytplot
>>> pytplot.tplot_rename("Variable1", "Variable2")
"""
#check if old name is in current dictionary
if old_name not in pytplot.data_quants.keys():
print("That name is currently not in pytplot")
return
#if old name input is a number, convert to corresponding name
if isinstance(old_name, int):
old_name = pytplot.data_quants[old_name].name
#remake dictionary with new name in old name's slot
d = pytplot.data_quants
d2 = OrderedDict([(new_name, v) if k == old_name else (k, v) for k, v in d.items()])
data_quants = d2
for key in d2:
data_quants[key].name = key
pytplot.data_quants = data_quants
return | python | def tplot_rename(old_name, new_name):
"""
This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variable 1 to Variable 2
>>> import pytplot
>>> pytplot.tplot_rename("Variable1", "Variable2")
"""
#check if old name is in current dictionary
if old_name not in pytplot.data_quants.keys():
print("That name is currently not in pytplot")
return
#if old name input is a number, convert to corresponding name
if isinstance(old_name, int):
old_name = pytplot.data_quants[old_name].name
#remake dictionary with new name in old name's slot
d = pytplot.data_quants
d2 = OrderedDict([(new_name, v) if k == old_name else (k, v) for k, v in d.items()])
data_quants = d2
for key in d2:
data_quants[key].name = key
pytplot.data_quants = data_quants
return | This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variable 1 to Variable 2
>>> import pytplot
>>> pytplot.tplot_rename("Variable1", "Variable2") | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_rename.py#L9-L45 |
MAVENSDC/PyTplot | pytplot/__init__.py | TVar._check_spec_bins_ordering | def _check_spec_bins_ordering(self):
"""
This is a private function of the TVar object, this is run during
object creation to check if spec_bins are ascending or descending
"""
if self.spec_bins is None:
return
if len(self.spec_bins) == len(self.data.index):
self.spec_bins_time_varying = True
break_top_loop = False
for index, row in self.spec_bins.iterrows():
if row.isnull().values.all():
continue
else:
for i in row.index:
if np.isfinite(row[i]) and np.isfinite(row[i + 1]):
ascending = row[i] < row[i + 1]
break_top_loop = True
break
else:
continue
if break_top_loop:
break
else:
ascending = self.spec_bins[0].iloc[0] < self.spec_bins[1].iloc[0]
return ascending | python | def _check_spec_bins_ordering(self):
"""
This is a private function of the TVar object, this is run during
object creation to check if spec_bins are ascending or descending
"""
if self.spec_bins is None:
return
if len(self.spec_bins) == len(self.data.index):
self.spec_bins_time_varying = True
break_top_loop = False
for index, row in self.spec_bins.iterrows():
if row.isnull().values.all():
continue
else:
for i in row.index:
if np.isfinite(row[i]) and np.isfinite(row[i + 1]):
ascending = row[i] < row[i + 1]
break_top_loop = True
break
else:
continue
if break_top_loop:
break
else:
ascending = self.spec_bins[0].iloc[0] < self.spec_bins[1].iloc[0]
return ascending | This is a private function of the TVar object, this is run during
object creation to check if spec_bins are ascending or descending | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/__init__.py#L138-L163 |
MAVENSDC/PyTplot | pytplot/tplot_names.py | tplot_names | def tplot_names():
"""
This function will print out and return a list of all current Tplot Variables stored in the memory.
Parameters:
None
Returns:
list : list of str
A list of all Tplot Variables stored in the memory
Examples:
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> tnames = pyplot.tplot_names()
0 : Variable 1
"""
index = 0
return_names=[]
for key, _ in data_quants.items():
if isinstance(data_quants[key].data, list):
if isinstance(key, str):
names_to_print = data_quants[key].name + " data from: "
for name in data_quants[key].data:
names_to_print = names_to_print + " " + name
print(index, ":", names_to_print)
index+=1
else:
if isinstance(key, str):
names_to_print = data_quants[key].name
print(index, ":", names_to_print)
index+=1
return_names.append(names_to_print)
return return_names | python | def tplot_names():
"""
This function will print out and return a list of all current Tplot Variables stored in the memory.
Parameters:
None
Returns:
list : list of str
A list of all Tplot Variables stored in the memory
Examples:
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> tnames = pyplot.tplot_names()
0 : Variable 1
"""
index = 0
return_names=[]
for key, _ in data_quants.items():
if isinstance(data_quants[key].data, list):
if isinstance(key, str):
names_to_print = data_quants[key].name + " data from: "
for name in data_quants[key].data:
names_to_print = names_to_print + " " + name
print(index, ":", names_to_print)
index+=1
else:
if isinstance(key, str):
names_to_print = data_quants[key].name
print(index, ":", names_to_print)
index+=1
return_names.append(names_to_print)
return return_names | This function will print out and return a list of all current Tplot Variables stored in the memory.
Parameters:
None
Returns:
list : list of str
A list of all Tplot Variables stored in the memory
Examples:
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> tnames = pyplot.tplot_names()
0 : Variable 1 | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_names.py#L8-L46 |
MAVENSDC/PyTplot | pytplot/interactiveplot.py | interactiveplot | def interactiveplot(t_average=None):
""" If the interactive option is set to True in tplot, this function will take in the stored tplot variables
and create a 2D interactive window that will pop up when any one of the tplot variables is plotted (so long
as at least one of the tplot variables is a spectrogram). If the mouse hovers over a spectrogram plot, data
for that point in time on the spectrogram plot will be plotted in the 2D interactive window. If the mouse
hovers over a non-spectrogram plot, the 2D interactive window returns an empty plot. If the 't_average'
option is selected, then the interactive window's y values will be time-averaged values, where the
amount of time for which those values have been averaged is determined by the number of seconds the user
indicates. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D interactive plot
pytplot.interactive_window = pg.GraphicsWindow()
pytplot.interactive_window.resize(1000, 600)
pytplot.interactive_window.setWindowTitle('Interactive Window')
plot = pytplot.interactive_window.addPlot(title='2D Interactive Plot', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
# The following update function is passed to change_hover_time in the HoverTime class
# defined in __init__.py. For reference, "t" essentially originates inside of
# TVarFigure(1D/Spec/Alt/Map), inside the _mousemoved function. It calls
# "self._mouseMovedFunction(int(mousePoint.x()))" and that is called every time the mouse is
# moved by Qt. Therefore, it gives the location of the mouse on the x axis. In tplot,
# mouse_moved_event is set to pytplot.hover_time.change_hover_time, so the mouseMovedFunction
# is pytplot.hover_time.change_hover_time. Thus, whenever change_hover_time is called, it
# calls every other function that is registered. Since the below function update() is
# registered as a listener, it'll update whenever hover_time is updated.
# to the HoverTime class with "t" as the input.
# TL;DR, t comes from getting the mouse location in pyqtgraph every time the mouse is moved
# and the below function will update the plot's position as the mouse is moved.
def update(t, name):
if name in valid_variables:
# Get the time closest to the x position the mouse is over.
time_array = np.array(data[name][2])
array = np.asarray(time_array)
idx = (np.abs(array - t)).argmin()
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx])) >= 0 and labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
if 't_average' in pytplot.data_quants[name].extras:
# If the user indicated that they wanted to average the interactive plot's y values based on a
# certain time range around the cursor location, we then want to get average of values around
# the cursor location.
t_min = data[name][2][0]
t_max = data[name][2][-1]
delta = pytplot.data_quants[name].extras['t_average']/int(2)
left_bound = data[name][2][idx] - delta
right_bound = data[name][2][idx] + delta
if (left_bound - t_min >= 0) and (t_max - right_bound >= 0):
# Find index of left and right bounds, no fancy foot work necessary.
idx_left = (np.abs(array - left_bound)).argmin()
idx_right = (np.abs(array - right_bound)).argmin()
elif left_bound - t_min < 0:
# Find the number of seconds difference between the cursor's
# left bound and the minimum time in the dataset, add that
# difference to the right bound time (since you want to push the bound
# forward in time, and set the left bound's index to be 0.
idx_left = 0
diff = right_bound + (t_min - left_bound)
idx_right = (np.abs(array - diff)).argmin()
elif t_max - right_bound < 0:
# Find the number of seconds difference between the cursor's
# right bound and the maximum time in the dataset, subtract that
# difference from the left bound time (since you want to push the bound
# back in time), and set the right bound's index to be -1.
idx_right = -1
diff = left_bound - (right_bound - t_max)
idx_left = (np.abs(array - diff)).argmin()
elif (left_bound - t_min < 0) and (t_max - right_bound < 0):
# The user is asking to average the entire time frame of the dataset...
# dunno why they want that, but if they do, use the time-averaged static plot,
# not this.
print(
'This plot isn\'t appropriate for what you want, use the time-averaged static plot.')
# Average values based on the calculated right and left bounds' indices.
time_diff = abs(idx_right - idx_left)
# Make sure to account for edge problem
if idx_right != -1:
y_values_slice = data[name][1][idx_left:idx_right + 1]
else:
y_values_slice = data[name][1][idx_left:]
y_values_avgd = np.sum(y_values_slice, axis=0)/np.float(time_diff)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
try:
# Plot data based on time we're hovering over
plot_data.setData(data[name][0][:], y_values_avgd)
except ZeroDivisionError:
pass
else:
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# If the user just wants a plain jane interactive plot...
# Plot data based on time we're hovering over'
try:
plot_data.setData(data[name][0][:], list(data[name][1][idx]))
except ZeroDivisionError:
pass
else:
# Cover the situation where you hover over a non-spectrogram plot.
plot.setLogMode(False, False)
plot.setLabel('bottom', '')
plot.setLabel('left', '')
plot_data.setData([], [])
# Make the above function called whenever hover_time is updated.
pytplot.hover_time.register_listener(update) | python | def interactiveplot(t_average=None):
""" If the interactive option is set to True in tplot, this function will take in the stored tplot variables
and create a 2D interactive window that will pop up when any one of the tplot variables is plotted (so long
as at least one of the tplot variables is a spectrogram). If the mouse hovers over a spectrogram plot, data
for that point in time on the spectrogram plot will be plotted in the 2D interactive window. If the mouse
hovers over a non-spectrogram plot, the 2D interactive window returns an empty plot. If the 't_average'
option is selected, then the interactive window's y values will be time-averaged values, where the
amount of time for which those values have been averaged is determined by the number of seconds the user
indicates. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D interactive plot
pytplot.interactive_window = pg.GraphicsWindow()
pytplot.interactive_window.resize(1000, 600)
pytplot.interactive_window.setWindowTitle('Interactive Window')
plot = pytplot.interactive_window.addPlot(title='2D Interactive Plot', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
# The following update function is passed to change_hover_time in the HoverTime class
# defined in __init__.py. For reference, "t" essentially originates inside of
# TVarFigure(1D/Spec/Alt/Map), inside the _mousemoved function. It calls
# "self._mouseMovedFunction(int(mousePoint.x()))" and that is called every time the mouse is
# moved by Qt. Therefore, it gives the location of the mouse on the x axis. In tplot,
# mouse_moved_event is set to pytplot.hover_time.change_hover_time, so the mouseMovedFunction
# is pytplot.hover_time.change_hover_time. Thus, whenever change_hover_time is called, it
# calls every other function that is registered. Since the below function update() is
# registered as a listener, it'll update whenever hover_time is updated.
# to the HoverTime class with "t" as the input.
# TL;DR, t comes from getting the mouse location in pyqtgraph every time the mouse is moved
# and the below function will update the plot's position as the mouse is moved.
def update(t, name):
if name in valid_variables:
# Get the time closest to the x position the mouse is over.
time_array = np.array(data[name][2])
array = np.asarray(time_array)
idx = (np.abs(array - t)).argmin()
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx])) >= 0 and labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
if 't_average' in pytplot.data_quants[name].extras:
# If the user indicated that they wanted to average the interactive plot's y values based on a
# certain time range around the cursor location, we then want to get average of values around
# the cursor location.
t_min = data[name][2][0]
t_max = data[name][2][-1]
delta = pytplot.data_quants[name].extras['t_average']/int(2)
left_bound = data[name][2][idx] - delta
right_bound = data[name][2][idx] + delta
if (left_bound - t_min >= 0) and (t_max - right_bound >= 0):
# Find index of left and right bounds, no fancy foot work necessary.
idx_left = (np.abs(array - left_bound)).argmin()
idx_right = (np.abs(array - right_bound)).argmin()
elif left_bound - t_min < 0:
# Find the number of seconds difference between the cursor's
# left bound and the minimum time in the dataset, add that
# difference to the right bound time (since you want to push the bound
# forward in time, and set the left bound's index to be 0.
idx_left = 0
diff = right_bound + (t_min - left_bound)
idx_right = (np.abs(array - diff)).argmin()
elif t_max - right_bound < 0:
# Find the number of seconds difference between the cursor's
# right bound and the maximum time in the dataset, subtract that
# difference from the left bound time (since you want to push the bound
# back in time), and set the right bound's index to be -1.
idx_right = -1
diff = left_bound - (right_bound - t_max)
idx_left = (np.abs(array - diff)).argmin()
elif (left_bound - t_min < 0) and (t_max - right_bound < 0):
# The user is asking to average the entire time frame of the dataset...
# dunno why they want that, but if they do, use the time-averaged static plot,
# not this.
print(
'This plot isn\'t appropriate for what you want, use the time-averaged static plot.')
# Average values based on the calculated right and left bounds' indices.
time_diff = abs(idx_right - idx_left)
# Make sure to account for edge problem
if idx_right != -1:
y_values_slice = data[name][1][idx_left:idx_right + 1]
else:
y_values_slice = data[name][1][idx_left:]
y_values_avgd = np.sum(y_values_slice, axis=0)/np.float(time_diff)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
try:
# Plot data based on time we're hovering over
plot_data.setData(data[name][0][:], y_values_avgd)
except ZeroDivisionError:
pass
else:
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# If the user just wants a plain jane interactive plot...
# Plot data based on time we're hovering over'
try:
plot_data.setData(data[name][0][:], list(data[name][1][idx]))
except ZeroDivisionError:
pass
else:
# Cover the situation where you hover over a non-spectrogram plot.
plot.setLogMode(False, False)
plot.setLabel('bottom', '')
plot.setLabel('left', '')
plot_data.setData([], [])
# Make the above function called whenever hover_time is updated.
pytplot.hover_time.register_listener(update) | If the interactive option is set to True in tplot, this function will take in the stored tplot variables
and create a 2D interactive window that will pop up when any one of the tplot variables is plotted (so long
as at least one of the tplot variables is a spectrogram). If the mouse hovers over a spectrogram plot, data
for that point in time on the spectrogram plot will be plotted in the 2D interactive window. If the mouse
hovers over a non-spectrogram plot, the 2D interactive window returns an empty plot. If the 't_average'
option is selected, then the interactive window's y values will be time-averaged values, where the
amount of time for which those values have been averaged is determined by the number of seconds the user
indicates. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/interactiveplot.py#L8-L162 |
MAVENSDC/PyTplot | pytplot/cdf_to_tplot.py | cdf_to_tplot | def cdf_to_tplot(filenames, varformat=None, get_support_data=False,
prefix='', suffix='', plot=False, merge=False):
"""
This function will automatically create tplot variables from CDF files.
.. note::
Variables must have an attribute named "VAR_TYPE". If the attribute entry
is "data" (or "support_data"), then they will be added as tplot variables.
Additionally, data variables should have attributes named "DEPEND_TIME" or
"DEPEND_0" that describes which variable is x axis. If the data is 2D,
then an attribute "DEPEND_1" must describe which variable contains the
secondary axis.
Parameters:
filenames : str/list of str
The file names and full paths of CDF files.
varformat : str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
merge: bool
If True, then data from different cdf files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created.
"""
stored_variables = []
epoch_cache = {}
global data_quants
if isinstance(filenames, str):
filenames = [filenames]
elif isinstance(filenames, list):
filenames = filenames
else:
print("Invalid filenames input.")
return stored_variables
var_type = ['data']
if varformat == None:
varformat = ".*"
if get_support_data:
var_type.append('support_data')
try:
varformat = varformat.replace("*", ".*")
var_regex = re.compile(varformat)
except:
print("Error reading the varformat.")
return
for filename in filenames:
cdf_file = cdflib.CDF(filename)
cdf_info = cdf_file.cdf_info()
all_cdf_variables = cdf_info['rVariables'] + cdf_info['zVariables']
# Find the data variables
for var in all_cdf_variables:
if not re.match(var_regex, var):
continue
var_atts = cdf_file.varattsget(var)
if 'VAR_TYPE' not in var_atts:
continue
if var_atts['VAR_TYPE'] in var_type:
var_properties = cdf_file.varinq(var)
if "DEPEND_TIME" in var_atts:
x_axis_var = var_atts["DEPEND_TIME"]
elif "DEPEND_0" in var_atts:
x_axis_var = var_atts["DEPEND_0"]
else:
print("Cannot find x axis.")
print("No attribute named DEPEND_TIME or DEPEND_0 in variable " + var)
continue
data_type_description = cdf_file.varinq(x_axis_var)['Data_Type_Description']
# Find data name and if it is already in stored variables
var_name = prefix + var + suffix
to_merge = False
if (var_name in data_quants.keys()) and (merge == True):
prev_data_quant = data_quants[var_name].data
to_merge = True
if epoch_cache.get(filename+x_axis_var) is None:
xdata = cdf_file.varget(x_axis_var)
if ('CDF_TIME' in data_type_description) or ('CDF_EPOCH' in data_type_description):
xdata = cdflib.cdfepoch.unixtime(xdata)
epoch_cache[filename+x_axis_var] = xdata
else:
xdata = epoch_cache[filename+x_axis_var]
ydata = cdf_file.varget(var)
if ydata is None:
continue
if "FILLVAL" in var_atts:
if (var_properties['Data_Type_Description'] == 'CDF_FLOAT' or
var_properties['Data_Type_Description'] == 'CDF_REAL4' or
var_properties['Data_Type_Description'] == 'CDF_DOUBLE' or
var_properties['Data_Type_Description'] == 'CDF_REAL8'):
if ydata[ydata == var_atts["FILLVAL"]].size != 0:
ydata[ydata == var_atts["FILLVAL"]] = np.nan
tplot_data = {'x': xdata, 'y': ydata}
depend_1 = None
depend_2 = None
if "DEPEND_1" in var_atts:
if var_atts["DEPEND_1"] in all_cdf_variables:
depend_1 = cdf_file.varget(var_atts["DEPEND_1"])
if "DEPEND_2" in var_atts:
if var_atts["DEPEND_2"] in all_cdf_variables:
depend_2 = cdf_file.varget(var_atts["DEPEND_2"])
if (depend_1 is not None) and (depend_2 is not None):
tplot_data['v1'] = depend_1
tplot_data['v2'] = depend_2
elif depend_1 is not None:
tplot_data['v'] = depend_1
elif depend_2 is not None:
tplot_data['v'] = depend_2
store_data(var_name, data=tplot_data)
if var_name not in stored_variables:
stored_variables.append(var_name)
display_type = var_atts.get("DISPLAY_TYPE", "time_series")
scale_type = var_atts.get("SCALE_TYP", "linear")
if display_type == "spectrogram":
options(var, 'spec', 1)
if scale_type == 'log':
options(var, 'ylog', 1)
if to_merge:
cur_data_quant = data_quants[var_name].data
merged_data = [prev_data_quant, cur_data_quant]
data_quants[var_name].data = pd.concat(merged_data)
cdf_file.close() if hasattr(cdf_file, "close") else None
if plot:
tplot(stored_variables)
return stored_variables | python | def cdf_to_tplot(filenames, varformat=None, get_support_data=False,
prefix='', suffix='', plot=False, merge=False):
"""
This function will automatically create tplot variables from CDF files.
.. note::
Variables must have an attribute named "VAR_TYPE". If the attribute entry
is "data" (or "support_data"), then they will be added as tplot variables.
Additionally, data variables should have attributes named "DEPEND_TIME" or
"DEPEND_0" that describes which variable is x axis. If the data is 2D,
then an attribute "DEPEND_1" must describe which variable contains the
secondary axis.
Parameters:
filenames : str/list of str
The file names and full paths of CDF files.
varformat : str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
merge: bool
If True, then data from different cdf files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created.
"""
stored_variables = []
epoch_cache = {}
global data_quants
if isinstance(filenames, str):
filenames = [filenames]
elif isinstance(filenames, list):
filenames = filenames
else:
print("Invalid filenames input.")
return stored_variables
var_type = ['data']
if varformat == None:
varformat = ".*"
if get_support_data:
var_type.append('support_data')
try:
varformat = varformat.replace("*", ".*")
var_regex = re.compile(varformat)
except:
print("Error reading the varformat.")
return
for filename in filenames:
cdf_file = cdflib.CDF(filename)
cdf_info = cdf_file.cdf_info()
all_cdf_variables = cdf_info['rVariables'] + cdf_info['zVariables']
# Find the data variables
for var in all_cdf_variables:
if not re.match(var_regex, var):
continue
var_atts = cdf_file.varattsget(var)
if 'VAR_TYPE' not in var_atts:
continue
if var_atts['VAR_TYPE'] in var_type:
var_properties = cdf_file.varinq(var)
if "DEPEND_TIME" in var_atts:
x_axis_var = var_atts["DEPEND_TIME"]
elif "DEPEND_0" in var_atts:
x_axis_var = var_atts["DEPEND_0"]
else:
print("Cannot find x axis.")
print("No attribute named DEPEND_TIME or DEPEND_0 in variable " + var)
continue
data_type_description = cdf_file.varinq(x_axis_var)['Data_Type_Description']
# Find data name and if it is already in stored variables
var_name = prefix + var + suffix
to_merge = False
if (var_name in data_quants.keys()) and (merge == True):
prev_data_quant = data_quants[var_name].data
to_merge = True
if epoch_cache.get(filename+x_axis_var) is None:
xdata = cdf_file.varget(x_axis_var)
if ('CDF_TIME' in data_type_description) or ('CDF_EPOCH' in data_type_description):
xdata = cdflib.cdfepoch.unixtime(xdata)
epoch_cache[filename+x_axis_var] = xdata
else:
xdata = epoch_cache[filename+x_axis_var]
ydata = cdf_file.varget(var)
if ydata is None:
continue
if "FILLVAL" in var_atts:
if (var_properties['Data_Type_Description'] == 'CDF_FLOAT' or
var_properties['Data_Type_Description'] == 'CDF_REAL4' or
var_properties['Data_Type_Description'] == 'CDF_DOUBLE' or
var_properties['Data_Type_Description'] == 'CDF_REAL8'):
if ydata[ydata == var_atts["FILLVAL"]].size != 0:
ydata[ydata == var_atts["FILLVAL"]] = np.nan
tplot_data = {'x': xdata, 'y': ydata}
depend_1 = None
depend_2 = None
if "DEPEND_1" in var_atts:
if var_atts["DEPEND_1"] in all_cdf_variables:
depend_1 = cdf_file.varget(var_atts["DEPEND_1"])
if "DEPEND_2" in var_atts:
if var_atts["DEPEND_2"] in all_cdf_variables:
depend_2 = cdf_file.varget(var_atts["DEPEND_2"])
if (depend_1 is not None) and (depend_2 is not None):
tplot_data['v1'] = depend_1
tplot_data['v2'] = depend_2
elif depend_1 is not None:
tplot_data['v'] = depend_1
elif depend_2 is not None:
tplot_data['v'] = depend_2
store_data(var_name, data=tplot_data)
if var_name not in stored_variables:
stored_variables.append(var_name)
display_type = var_atts.get("DISPLAY_TYPE", "time_series")
scale_type = var_atts.get("SCALE_TYP", "linear")
if display_type == "spectrogram":
options(var, 'spec', 1)
if scale_type == 'log':
options(var, 'ylog', 1)
if to_merge:
cur_data_quant = data_quants[var_name].data
merged_data = [prev_data_quant, cur_data_quant]
data_quants[var_name].data = pd.concat(merged_data)
cdf_file.close() if hasattr(cdf_file, "close") else None
if plot:
tplot(stored_variables)
return stored_variables | This function will automatically create tplot variables from CDF files.
.. note::
Variables must have an attribute named "VAR_TYPE". If the attribute entry
is "data" (or "support_data"), then they will be added as tplot variables.
Additionally, data variables should have attributes named "DEPEND_TIME" or
"DEPEND_0" that describes which variable is x axis. If the data is 2D,
then an attribute "DEPEND_1" must describe which variable contains the
secondary axis.
Parameters:
filenames : str/list of str
The file names and full paths of CDF files.
varformat : str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
merge: bool
If True, then data from different cdf files will be merged into
a single pytplot variable.
Returns:
List of tplot variables created. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/cdf_to_tplot.py#L17-L177 |
MAVENSDC/PyTplot | pytplot/get_timespan.py | get_timespan | def get_timespan(name):
"""
This function extracts the time span from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_begin : float
The beginning of the time series
time_end : float
The end of the time series
Examples:
>>> # Retrieve the time span from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time1, time2 = pytplot.get_timespan("Variable1")
"""
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0]))
print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1]))
return(data_quants[name].trange[0], data_quants[name].trange[1]) | python | def get_timespan(name):
"""
This function extracts the time span from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_begin : float
The beginning of the time series
time_end : float
The end of the time series
Examples:
>>> # Retrieve the time span from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time1, time2 = pytplot.get_timespan("Variable1")
"""
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0]))
print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1]))
return(data_quants[name].trange[0], data_quants[name].trange[1]) | This function extracts the time span from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_begin : float
The beginning of the time series
time_end : float
The end of the time series
Examples:
>>> # Retrieve the time span from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time1, time2 = pytplot.get_timespan("Variable1") | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/get_timespan.py#L10-L40 |
MAVENSDC/PyTplot | pytplot/timebar.py | timebar | def timebar(t, varname = None, databar = False, delete = False, color = 'black', thick = 1, dash = False):
"""
This function will add a vertical bar to all time series plots. This is useful if you
want to bring attention to a specific time.
Parameters:
t : flt/list
The time in seconds since Jan 01 1970 to place the vertical bar. If a list of numbers are supplied,
multiple bars will be created. If "databar" is set, then "t" becomes the point on the y axis to
place a horizontal bar.
varname : str/list, optional
The variable(s) to add the vertical bar to. If not set, the default is to add it to all current plots.
databar : bool, optional
This will turn the timebar into a horizontal data bar. If this is set True, then variable "t" becomes
the point on the y axis to place a horizontal bar.
delete : bool, optional
If set to True, at lease one varname must be supplied. The timebar at point "t" for variable "varname"
will be removed.
color : str
The color of the bar
thick : int
The thickness of the bar
dash : bool
If set to True, the bar is dashed rather than solid
Returns:
None
Examples:
>>> # Place a green time bar at 2017-07-17 00:00:00
>>> import pytplot
>>> pytplot.timebar(1500249600, color='green')
>>> # Place a dashed data bar at 5500 on the y axis
>>> pytplot.timebar(5500, dashed=True, databar=True)
>>> Place 3 magenta time bars of thickness 5
at [2015-12-26 05:20:01, 2015-12-26 08:06:40, 2015-12-26 08:53:19]
for variable 'sgx' plot
>>> pytplot.timebar([1451107201,1451117200,1451119999],'sgx',color='m',thick=5)
"""
# make sure t entered is a list
if not isinstance(t, list):
t = [t]
#if entries in list not numerical, run str_to_int
if not isinstance(t[0], (int, float, complex)):
t1 = []
for time in t:
t1.append(tplot_utilities.str_to_int(time))
t = t1
dim = 'height'
if databar is True:
dim = 'width'
dash_pattern = 'solid'
if dash is True:
dash_pattern = 'dashed'
if delete is True:
tplot_utilities.timebar_delete(t, varname, dim)
return
#if no varname specified, add timebars to every plot
if varname is None:
num_bars = len(t)
for i in range(num_bars):
tbar = {}
tbar['location'] = t[i]
tbar['dimension'] = dim
tbar['line_color'] = pytplot.tplot_utilities.rgb_color(color)
tbar['line_width'] = thick
tbar['line_dash'] = dash_pattern
for name in data_quants:
temp_data_quants = data_quants[name]
temp_data_quants.time_bar.append(tbar)
#if varname specified
else:
if not isinstance(varname, list):
varname = [varname]
for j in varname:
if j not in data_quants.keys():
print(str(j) + "is currently not in pytplot")
else:
num_bars = len(t)
for i in range(num_bars):
tbar = {}
tbar['location'] = t[i]
tbar['dimension'] = dim
tbar['line_color'] = pytplot.tplot_utilities.rgb_color(color)
tbar['line_width'] = thick
tbar['line_dash'] = dash_pattern
temp_data_quants = data_quants[j]
temp_data_quants.time_bar.append(tbar)
return | python | def timebar(t, varname = None, databar = False, delete = False, color = 'black', thick = 1, dash = False):
"""
This function will add a vertical bar to all time series plots. This is useful if you
want to bring attention to a specific time.
Parameters:
t : flt/list
The time in seconds since Jan 01 1970 to place the vertical bar. If a list of numbers are supplied,
multiple bars will be created. If "databar" is set, then "t" becomes the point on the y axis to
place a horizontal bar.
varname : str/list, optional
The variable(s) to add the vertical bar to. If not set, the default is to add it to all current plots.
databar : bool, optional
This will turn the timebar into a horizontal data bar. If this is set True, then variable "t" becomes
the point on the y axis to place a horizontal bar.
delete : bool, optional
If set to True, at lease one varname must be supplied. The timebar at point "t" for variable "varname"
will be removed.
color : str
The color of the bar
thick : int
The thickness of the bar
dash : bool
If set to True, the bar is dashed rather than solid
Returns:
None
Examples:
>>> # Place a green time bar at 2017-07-17 00:00:00
>>> import pytplot
>>> pytplot.timebar(1500249600, color='green')
>>> # Place a dashed data bar at 5500 on the y axis
>>> pytplot.timebar(5500, dashed=True, databar=True)
>>> Place 3 magenta time bars of thickness 5
at [2015-12-26 05:20:01, 2015-12-26 08:06:40, 2015-12-26 08:53:19]
for variable 'sgx' plot
>>> pytplot.timebar([1451107201,1451117200,1451119999],'sgx',color='m',thick=5)
"""
# make sure t entered is a list
if not isinstance(t, list):
t = [t]
#if entries in list not numerical, run str_to_int
if not isinstance(t[0], (int, float, complex)):
t1 = []
for time in t:
t1.append(tplot_utilities.str_to_int(time))
t = t1
dim = 'height'
if databar is True:
dim = 'width'
dash_pattern = 'solid'
if dash is True:
dash_pattern = 'dashed'
if delete is True:
tplot_utilities.timebar_delete(t, varname, dim)
return
#if no varname specified, add timebars to every plot
if varname is None:
num_bars = len(t)
for i in range(num_bars):
tbar = {}
tbar['location'] = t[i]
tbar['dimension'] = dim
tbar['line_color'] = pytplot.tplot_utilities.rgb_color(color)
tbar['line_width'] = thick
tbar['line_dash'] = dash_pattern
for name in data_quants:
temp_data_quants = data_quants[name]
temp_data_quants.time_bar.append(tbar)
#if varname specified
else:
if not isinstance(varname, list):
varname = [varname]
for j in varname:
if j not in data_quants.keys():
print(str(j) + "is currently not in pytplot")
else:
num_bars = len(t)
for i in range(num_bars):
tbar = {}
tbar['location'] = t[i]
tbar['dimension'] = dim
tbar['line_color'] = pytplot.tplot_utilities.rgb_color(color)
tbar['line_width'] = thick
tbar['line_dash'] = dash_pattern
temp_data_quants = data_quants[j]
temp_data_quants.time_bar.append(tbar)
return | This function will add a vertical bar to all time series plots. This is useful if you
want to bring attention to a specific time.
Parameters:
t : flt/list
The time in seconds since Jan 01 1970 to place the vertical bar. If a list of numbers are supplied,
multiple bars will be created. If "databar" is set, then "t" becomes the point on the y axis to
place a horizontal bar.
varname : str/list, optional
The variable(s) to add the vertical bar to. If not set, the default is to add it to all current plots.
databar : bool, optional
This will turn the timebar into a horizontal data bar. If this is set True, then variable "t" becomes
the point on the y axis to place a horizontal bar.
delete : bool, optional
If set to True, at lease one varname must be supplied. The timebar at point "t" for variable "varname"
will be removed.
color : str
The color of the bar
thick : int
The thickness of the bar
dash : bool
If set to True, the bar is dashed rather than solid
Returns:
None
Examples:
>>> # Place a green time bar at 2017-07-17 00:00:00
>>> import pytplot
>>> pytplot.timebar(1500249600, color='green')
>>> # Place a dashed data bar at 5500 on the y axis
>>> pytplot.timebar(5500, dashed=True, databar=True)
>>> Place 3 magenta time bars of thickness 5
at [2015-12-26 05:20:01, 2015-12-26 08:06:40, 2015-12-26 08:53:19]
for variable 'sgx' plot
>>> pytplot.timebar([1451107201,1451117200,1451119999],'sgx',color='m',thick=5) | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/timebar.py#L11-L110 |
MAVENSDC/PyTplot | pytplot/options.py | options | def options(name, option, value):
"""
This function allows the user to set a large variety of options for individual plots.
Parameters:
name : str
Name of the tplot variable
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
Color str/list Red, Orange, Yellow, Green, Blue, etc.
Colormap str/list https://matplotlib.org/examples/color/colormaps_reference.html.
Spec int 1 sets the Tplot Variable to spectrogram mode, 0 reverts.
Alt int 1 sets the Tplot Variable to altitude plot mode, 0 reverts.
Map int 1 sets the Tplot Variable to latitude/longitude mode, 0 reverts.
link list Allows a user to reference one tplot variable to another.
ylog int 1 sets the y axis to log scale, 0 reverts.
zlog int 1 sets the z axis to log scale, 0 reverts (spectrograms only).
legend_names list A list of strings that will be used to identify the lines.
xlog_interactive bool Sets x axis on interactive plot to log scale if True.
ylog bool Set y axis on main plot window to log scale if True.
ylog_interactive bool Sets y axis on interactive plot to log scale if True.
zlog bool Sets z axis on main plot window to log scale if True.
line_style str solid_line, dot, dash, dash_dot, dash_dot_dot_dot, long_dash.
char_size int Defines character size for plot labels, etc.
name str The title of the plot.
panel_size flt Number between (0,1], representing the percent size of the plot.
basemap str Full path and name of a background image for "Map" plots.
alpha flt Number between [0,1], gives the transparancy of the plot lines.
thick flt Sets plot line width.
yrange flt list Two numbers that give the y axis range of the plot.
zrange flt list Two numbers that give the z axis range of the plot.
xrange_interactive flt list Two numberes that give the x axis range of interactive plots.
yrange_interactive flt list Two numberes that give the y axis range of interactive plots.
ytitle str Title shown on the y axis.
ztitle str Title shown on the z axis. Spec plots only.
plotter str Allows a user to implement their own plotting script in place of the ones
herein.
crosshair_x str Title for x-axis crosshair.
crosshair_y str Title for y-axis crosshair.
crosshair_z str Title for z-axis crosshair.
static str Datetime string that gives desired time to plot y and z values from a spec
plot.
static_tavg str Datetime string that gives desired time-averaged y and z values to plot
from a spec plot.
t_average int Seconds around which the cursor is averaged when hovering over spectrogram
plots.
Returns:
None
Examples:
>>> # Change the y range of Variable1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.options('Variable1', 'yrange', [2,4])
>>> # Change Variable1 to use a log scale
>>> pytplot.options('Variable1', 'ylog', 1)
"""
if not isinstance(name, list):
name = [name]
option = option.lower()
for i in name:
if i not in data_quants.keys():
print(str(i) + " is currently not in pytplot.")
return
if option == 'color':
if isinstance(value, list):
data_quants[i].extras['line_color'] = value
else:
data_quants[i].extras['line_color'] = [value]
if option == 'link':
if isinstance(value, list):
data_quants[i].link_to_tvar(value[0], value[1])
if option == 'colormap':
if isinstance(value, list):
data_quants[i].extras['colormap'] = value
else:
data_quants[i].extras['colormap'] = [value]
if option == 'spec':
_reset_plots(i)
data_quants[i].extras['spec'] = value
if option == 'alt':
_reset_plots(i)
data_quants[i].extras['alt'] = value
if option == 'map':
_reset_plots(i)
data_quants[i].extras['map'] = value
if option == 'legend_names':
data_quants[i].yaxis_opt['legend_names'] = value
if option == 'xlog_interactive':
if value:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'log'
else:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'linear'
if option == 'ylog':
negflag = _ylog_check(data_quants, value, i)
if negflag == 0:
data_quants[i].yaxis_opt['y_axis_type'] = 'log'
else:
data_quants[i].yaxis_opt['y_axis_type'] = 'linear'
if option == 'ylog_interactive':
if value:
data_quants[i].interactive_yaxis_opt['yi_axis_type'] = 'log'
else:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'linear'
if option == 'zlog':
negflag = _zlog_check(data_quants, value, i)
if negflag == 0:
data_quants[i].zaxis_opt['z_axis_type'] = 'log'
else:
data_quants[i].zaxis_opt['z_axis_type'] = 'linear'
if option == 'nodata':
data_quants[i].line_opt['visible'] = value
if option == 'line_style':
to_be = []
if value == 0 or value == 'solid_line':
to_be = []
elif value == 1 or value == 'dot':
to_be = [2, 4]
elif value == 2 or value == 'dash':
to_be = [6]
elif value == 3 or value == 'dash_dot':
to_be = [6, 4, 2, 4]
elif value == 4 or value == 'dash_dot_dot_dot':
to_be = [6, 4, 2, 4, 2, 4, 2, 4]
elif value == 5 or value == 'long_dash':
to_be = [10]
data_quants[i].line_opt['line_dash'] = to_be
if(value == 6 or value == 'none'):
data_quants[i].line_opt['visible'] = False
if option == 'char_size':
data_quants[i].extras['char_size'] = value
if option == 'name':
data_quants[i].line_opt['name'] = value
if option == "panel_size":
if value > 1 or value <= 0:
print("Invalid value. Should be (0, 1]")
return
data_quants[i].extras['panel_size'] = value
if option == 'basemap':
data_quants[i].extras['basemap'] = value
if option == 'alpha':
if value > 1 or value < 0:
print("Invalid value. Should be [0, 1]")
return
data_quants[i].extras['alpha'] = value
if option == 'thick':
data_quants[i].line_opt['line_width'] = value
if option == ('yrange' or 'y_range'):
data_quants[i].yaxis_opt['y_range'] = [value[0], value[1]]
if option == ('zrange' or 'z_range'):
data_quants[i].zaxis_opt['z_range'] = [value[0], value[1]]
if option == 'xrange_interactive':
data_quants[i].interactive_xaxis_opt['xi_range'] = [value[0], value[1]]
if option == 'yrange_interactive':
data_quants[i].interactive_yaxis_opt['yi_range'] = [value[0], value[1]]
if option == 'xtitle':
data_quants[i].xaxis_opt['axis_label'] = value
if option == 'ytitle':
data_quants[i].yaxis_opt['axis_label'] = value
if option == 'ztitle':
data_quants[i].zaxis_opt['axis_label'] = value
if option == 'plotter':
_reset_plots(i)
data_quants[i].extras['plotter'] = value
if option == 'crosshair_x':
data_quants[i].xaxis_opt['crosshair'] = value
if option == 'crosshair_y':
data_quants[i].yaxis_opt['crosshair'] = value
if option == 'crosshair_z':
data_quants[i].zaxis_opt['crosshair'] = value
if option == 'static':
data_quants[i].extras['static'] = value
if option == 'static_tavg':
data_quants[i].extras['static_tavg'] = [value[0], value[1]]
if option == 't_average':
data_quants[i].extras['t_average'] = value
return | python | def options(name, option, value):
"""
This function allows the user to set a large variety of options for individual plots.
Parameters:
name : str
Name of the tplot variable
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
Color str/list Red, Orange, Yellow, Green, Blue, etc.
Colormap str/list https://matplotlib.org/examples/color/colormaps_reference.html.
Spec int 1 sets the Tplot Variable to spectrogram mode, 0 reverts.
Alt int 1 sets the Tplot Variable to altitude plot mode, 0 reverts.
Map int 1 sets the Tplot Variable to latitude/longitude mode, 0 reverts.
link list Allows a user to reference one tplot variable to another.
ylog int 1 sets the y axis to log scale, 0 reverts.
zlog int 1 sets the z axis to log scale, 0 reverts (spectrograms only).
legend_names list A list of strings that will be used to identify the lines.
xlog_interactive bool Sets x axis on interactive plot to log scale if True.
ylog bool Set y axis on main plot window to log scale if True.
ylog_interactive bool Sets y axis on interactive plot to log scale if True.
zlog bool Sets z axis on main plot window to log scale if True.
line_style str solid_line, dot, dash, dash_dot, dash_dot_dot_dot, long_dash.
char_size int Defines character size for plot labels, etc.
name str The title of the plot.
panel_size flt Number between (0,1], representing the percent size of the plot.
basemap str Full path and name of a background image for "Map" plots.
alpha flt Number between [0,1], gives the transparancy of the plot lines.
thick flt Sets plot line width.
yrange flt list Two numbers that give the y axis range of the plot.
zrange flt list Two numbers that give the z axis range of the plot.
xrange_interactive flt list Two numberes that give the x axis range of interactive plots.
yrange_interactive flt list Two numberes that give the y axis range of interactive plots.
ytitle str Title shown on the y axis.
ztitle str Title shown on the z axis. Spec plots only.
plotter str Allows a user to implement their own plotting script in place of the ones
herein.
crosshair_x str Title for x-axis crosshair.
crosshair_y str Title for y-axis crosshair.
crosshair_z str Title for z-axis crosshair.
static str Datetime string that gives desired time to plot y and z values from a spec
plot.
static_tavg str Datetime string that gives desired time-averaged y and z values to plot
from a spec plot.
t_average int Seconds around which the cursor is averaged when hovering over spectrogram
plots.
Returns:
None
Examples:
>>> # Change the y range of Variable1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.options('Variable1', 'yrange', [2,4])
>>> # Change Variable1 to use a log scale
>>> pytplot.options('Variable1', 'ylog', 1)
"""
if not isinstance(name, list):
name = [name]
option = option.lower()
for i in name:
if i not in data_quants.keys():
print(str(i) + " is currently not in pytplot.")
return
if option == 'color':
if isinstance(value, list):
data_quants[i].extras['line_color'] = value
else:
data_quants[i].extras['line_color'] = [value]
if option == 'link':
if isinstance(value, list):
data_quants[i].link_to_tvar(value[0], value[1])
if option == 'colormap':
if isinstance(value, list):
data_quants[i].extras['colormap'] = value
else:
data_quants[i].extras['colormap'] = [value]
if option == 'spec':
_reset_plots(i)
data_quants[i].extras['spec'] = value
if option == 'alt':
_reset_plots(i)
data_quants[i].extras['alt'] = value
if option == 'map':
_reset_plots(i)
data_quants[i].extras['map'] = value
if option == 'legend_names':
data_quants[i].yaxis_opt['legend_names'] = value
if option == 'xlog_interactive':
if value:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'log'
else:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'linear'
if option == 'ylog':
negflag = _ylog_check(data_quants, value, i)
if negflag == 0:
data_quants[i].yaxis_opt['y_axis_type'] = 'log'
else:
data_quants[i].yaxis_opt['y_axis_type'] = 'linear'
if option == 'ylog_interactive':
if value:
data_quants[i].interactive_yaxis_opt['yi_axis_type'] = 'log'
else:
data_quants[i].interactive_xaxis_opt['xi_axis_type'] = 'linear'
if option == 'zlog':
negflag = _zlog_check(data_quants, value, i)
if negflag == 0:
data_quants[i].zaxis_opt['z_axis_type'] = 'log'
else:
data_quants[i].zaxis_opt['z_axis_type'] = 'linear'
if option == 'nodata':
data_quants[i].line_opt['visible'] = value
if option == 'line_style':
to_be = []
if value == 0 or value == 'solid_line':
to_be = []
elif value == 1 or value == 'dot':
to_be = [2, 4]
elif value == 2 or value == 'dash':
to_be = [6]
elif value == 3 or value == 'dash_dot':
to_be = [6, 4, 2, 4]
elif value == 4 or value == 'dash_dot_dot_dot':
to_be = [6, 4, 2, 4, 2, 4, 2, 4]
elif value == 5 or value == 'long_dash':
to_be = [10]
data_quants[i].line_opt['line_dash'] = to_be
if(value == 6 or value == 'none'):
data_quants[i].line_opt['visible'] = False
if option == 'char_size':
data_quants[i].extras['char_size'] = value
if option == 'name':
data_quants[i].line_opt['name'] = value
if option == "panel_size":
if value > 1 or value <= 0:
print("Invalid value. Should be (0, 1]")
return
data_quants[i].extras['panel_size'] = value
if option == 'basemap':
data_quants[i].extras['basemap'] = value
if option == 'alpha':
if value > 1 or value < 0:
print("Invalid value. Should be [0, 1]")
return
data_quants[i].extras['alpha'] = value
if option == 'thick':
data_quants[i].line_opt['line_width'] = value
if option == ('yrange' or 'y_range'):
data_quants[i].yaxis_opt['y_range'] = [value[0], value[1]]
if option == ('zrange' or 'z_range'):
data_quants[i].zaxis_opt['z_range'] = [value[0], value[1]]
if option == 'xrange_interactive':
data_quants[i].interactive_xaxis_opt['xi_range'] = [value[0], value[1]]
if option == 'yrange_interactive':
data_quants[i].interactive_yaxis_opt['yi_range'] = [value[0], value[1]]
if option == 'xtitle':
data_quants[i].xaxis_opt['axis_label'] = value
if option == 'ytitle':
data_quants[i].yaxis_opt['axis_label'] = value
if option == 'ztitle':
data_quants[i].zaxis_opt['axis_label'] = value
if option == 'plotter':
_reset_plots(i)
data_quants[i].extras['plotter'] = value
if option == 'crosshair_x':
data_quants[i].xaxis_opt['crosshair'] = value
if option == 'crosshair_y':
data_quants[i].yaxis_opt['crosshair'] = value
if option == 'crosshair_z':
data_quants[i].zaxis_opt['crosshair'] = value
if option == 'static':
data_quants[i].extras['static'] = value
if option == 'static_tavg':
data_quants[i].extras['static_tavg'] = [value[0], value[1]]
if option == 't_average':
data_quants[i].extras['t_average'] = value
return | This function allows the user to set a large variety of options for individual plots.
Parameters:
name : str
Name of the tplot variable
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
Color str/list Red, Orange, Yellow, Green, Blue, etc.
Colormap str/list https://matplotlib.org/examples/color/colormaps_reference.html.
Spec int 1 sets the Tplot Variable to spectrogram mode, 0 reverts.
Alt int 1 sets the Tplot Variable to altitude plot mode, 0 reverts.
Map int 1 sets the Tplot Variable to latitude/longitude mode, 0 reverts.
link list Allows a user to reference one tplot variable to another.
ylog int 1 sets the y axis to log scale, 0 reverts.
zlog int 1 sets the z axis to log scale, 0 reverts (spectrograms only).
legend_names list A list of strings that will be used to identify the lines.
xlog_interactive bool Sets x axis on interactive plot to log scale if True.
ylog bool Set y axis on main plot window to log scale if True.
ylog_interactive bool Sets y axis on interactive plot to log scale if True.
zlog bool Sets z axis on main plot window to log scale if True.
line_style str solid_line, dot, dash, dash_dot, dash_dot_dot_dot, long_dash.
char_size int Defines character size for plot labels, etc.
name str The title of the plot.
panel_size flt Number between (0,1], representing the percent size of the plot.
basemap str Full path and name of a background image for "Map" plots.
alpha flt Number between [0,1], gives the transparancy of the plot lines.
thick flt Sets plot line width.
yrange flt list Two numbers that give the y axis range of the plot.
zrange flt list Two numbers that give the z axis range of the plot.
xrange_interactive flt list Two numberes that give the x axis range of interactive plots.
yrange_interactive flt list Two numberes that give the y axis range of interactive plots.
ytitle str Title shown on the y axis.
ztitle str Title shown on the z axis. Spec plots only.
plotter str Allows a user to implement their own plotting script in place of the ones
herein.
crosshair_x str Title for x-axis crosshair.
crosshair_y str Title for y-axis crosshair.
crosshair_z str Title for z-axis crosshair.
static str Datetime string that gives desired time to plot y and z values from a spec
plot.
static_tavg str Datetime string that gives desired time-averaged y and z values to plot
from a spec plot.
t_average int Seconds around which the cursor is averaged when hovering over spectrogram
plots.
Returns:
None
Examples:
>>> # Change the y range of Variable1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.options('Variable1', 'yrange', [2,4])
>>> # Change Variable1 to use a log scale
>>> pytplot.options('Variable1', 'ylog', 1) | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/options.py#L10-L235 |
MAVENSDC/PyTplot | pytplot/tplot_restore.py | tplot_restore | def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | python | def tplot_restore(filename):
"""
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
"""
#Error check
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
#Check if the restored file was an IDL file
if filename.endswith('.tplot'):
temp_tplot = readsav(filename)
for i in range(len(temp_tplot['dq'])):
data_name = temp_tplot['dq'][i][0].decode("utf-8")
temp_x_data = temp_tplot['dq'][i][1][0][0]
#Pandas reads in data the other way I guess
if len(temp_tplot['dq'][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2])
else:
temp_y_data = temp_tplot['dq'][i][1][0][2]
#If there are more than 4 fields, that means it is a spectrogram
if len(temp_tplot['dq'][i][1][0]) > 4:
temp_v_data = temp_tplot['dq'][i][1][0][4]
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == '>'):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data})
else:
#Change from little endian to big endian, since pandas apparently hates little endian
#We might want to move this into the store_data procedure eventually
if (temp_x_data.dtype.byteorder == '>'):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == '>'):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data})
if temp_tplot['dq'][i][3].dtype.names is not None:
for option_name in temp_tplot['dq'][i][3].dtype.names:
options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist()
data_quants[data_name].dtype = temp_tplot['dq'][i][5]
data_quants[data_name].create_time = temp_tplot['dq'][i][6]
for option_name in temp_tplot['tv'][0][0].dtype.names:
if option_name == 'TRANGE':
tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'WSIZE':
tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0])
if option_name == 'VAR_LABEL':
tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0])
if 'P' in temp_tplot['tv'][0][1].tolist():
for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names:
if option_name == 'TITLE':
tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0])
#temp_tplot['tv'][0][1] is all of the "settings" variables
#temp_tplot['tv'][0][1]['D'][0] is "device" options
#temp_tplot['tv'][0][1]['P'][0] is "plot" options
#temp_tplot['tv'][0][1]['X'][0] is x axis options
#temp_tplot['tv'][0][1]['Y'][0] is y axis options
####################################################################
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return | This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot') | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_restore.py#L16-L114 |
MAVENSDC/PyTplot | pytplot/timespan.py | timespan | def timespan(t1, dt, keyword = 'days'):
"""
This function will set the time range for all time series plots. This is a wrapper for the function "xlim" to
better handle time axes.
Parameters:
t1 : flt/str
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
dt : flt
The time duration of the plots. Default is number of days.
keyword : str
Sets the units of the "dt" variable. Days, hours, minutes, and seconds are all accepted.
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.timespan(1500249600, 1)
>>> # The same as above, but using different inputs
>>> pytplot.timespan("2017-07-17 00:00:00", 24, keyword='hours')
"""
if keyword is 'days':
dt *= 86400
elif keyword is 'hours':
dt *= 3600
elif keyword is 'minutes':
dt *= 60
elif keyword is 'seconds':
dt *= 1
else:
print("Invalid 'keyword' option.\nEnum(None, 'hours', 'minutes', 'seconds', 'days')")
if not isinstance(t1, (int, float, complex)):
t1 = tplot_utilities.str_to_int(t1)
t2 = t1+dt
xlim(t1, t2)
return | python | def timespan(t1, dt, keyword = 'days'):
"""
This function will set the time range for all time series plots. This is a wrapper for the function "xlim" to
better handle time axes.
Parameters:
t1 : flt/str
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
dt : flt
The time duration of the plots. Default is number of days.
keyword : str
Sets the units of the "dt" variable. Days, hours, minutes, and seconds are all accepted.
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.timespan(1500249600, 1)
>>> # The same as above, but using different inputs
>>> pytplot.timespan("2017-07-17 00:00:00", 24, keyword='hours')
"""
if keyword is 'days':
dt *= 86400
elif keyword is 'hours':
dt *= 3600
elif keyword is 'minutes':
dt *= 60
elif keyword is 'seconds':
dt *= 1
else:
print("Invalid 'keyword' option.\nEnum(None, 'hours', 'minutes', 'seconds', 'days')")
if not isinstance(t1, (int, float, complex)):
t1 = tplot_utilities.str_to_int(t1)
t2 = t1+dt
xlim(t1, t2)
return | This function will set the time range for all time series plots. This is a wrapper for the function "xlim" to
better handle time axes.
Parameters:
t1 : flt/str
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
dt : flt
The time duration of the plots. Default is number of days.
keyword : str
Sets the units of the "dt" variable. Days, hours, minutes, and seconds are all accepted.
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.timespan(1500249600, 1)
>>> # The same as above, but using different inputs
>>> pytplot.timespan("2017-07-17 00:00:00", 24, keyword='hours') | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/timespan.py#L9-L52 |
MAVENSDC/PyTplot | pytplot/tplot_options.py | tplot_options | def tplot_options(option, value):
"""
This function allows the user to set several global options for the generated plots.
Parameters:
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
title str Title of the the entire output
title_size int Font size of the output
wsize [int, int] [height, width], pixel size of the plot window
title_align int Offset position in pixels of the title
var_label srt Name of the tplot variable to be used as another x axis
alt_range [flt, flt] The min and max altitude to be plotted on all alt plots
map_x_range [int, int] The min and max longitude to be plotted on all map plots
map_y_range [int, int] The min and max latitude to be plotted on all map plots
x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots
data_gap int Number of seconds with consecutive nan values allowed before no interp should occur
crosshair bool Option allowing crosshairs and crosshair legend
roi [str, str] Times between which there's a region of interest for a user
============ ========== =====
Returns:
None
Examples:
>>> # Set the plot title
>>> import pytplot
>>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563')
>>> # Set the window size
>>> pytplot.tplot_options('wsize', [1000,500])
"""
option = option.lower()
temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob)
pytplot.tplot_opt_glob = temp
return | python | def tplot_options(option, value):
"""
This function allows the user to set several global options for the generated plots.
Parameters:
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
title str Title of the the entire output
title_size int Font size of the output
wsize [int, int] [height, width], pixel size of the plot window
title_align int Offset position in pixels of the title
var_label srt Name of the tplot variable to be used as another x axis
alt_range [flt, flt] The min and max altitude to be plotted on all alt plots
map_x_range [int, int] The min and max longitude to be plotted on all map plots
map_y_range [int, int] The min and max latitude to be plotted on all map plots
x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots
data_gap int Number of seconds with consecutive nan values allowed before no interp should occur
crosshair bool Option allowing crosshairs and crosshair legend
roi [str, str] Times between which there's a region of interest for a user
============ ========== =====
Returns:
None
Examples:
>>> # Set the plot title
>>> import pytplot
>>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563')
>>> # Set the window size
>>> pytplot.tplot_options('wsize', [1000,500])
"""
option = option.lower()
temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob)
pytplot.tplot_opt_glob = temp
return | This function allows the user to set several global options for the generated plots.
Parameters:
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
title str Title of the the entire output
title_size int Font size of the output
wsize [int, int] [height, width], pixel size of the plot window
title_align int Offset position in pixels of the title
var_label srt Name of the tplot variable to be used as another x axis
alt_range [flt, flt] The min and max altitude to be plotted on all alt plots
map_x_range [int, int] The min and max longitude to be plotted on all map plots
map_y_range [int, int] The min and max latitude to be plotted on all map plots
x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots
data_gap int Number of seconds with consecutive nan values allowed before no interp should occur
crosshair bool Option allowing crosshairs and crosshair legend
roi [str, str] Times between which there's a region of interest for a user
============ ========== =====
Returns:
None
Examples:
>>> # Set the plot title
>>> import pytplot
>>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563')
>>> # Set the window size
>>> pytplot.tplot_options('wsize', [1000,500]) | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_options.py#L9-L58 |
MAVENSDC/PyTplot | pytplot/tplot.py | tplot | def tplot(name,
var_label=None,
auto_color=True,
interactive=False,
combine_axes=True,
nb=False,
save_file=None,
gui=False,
qt=False,
bokeh=False,
save_png=None,
display=True,
testing=False):
"""
This is the function used to display the tplot variables stored in memory.
The default output is to show the plots stacked on top of one another inside a GUI window.
The GUI window has the option to export the plots in either PNG or HTML formats.
.. note::
This plotting routine uses the python Bokeh library, which creates plots using HTML and Javascript.
Bokeh is technically still in beta, so future patches to Bokeh may require updates to this function.
Parameters:
name : str / list
List of tplot variables that will be plotted
var_label : str, optional
The name of the tplot variable you would like as
a second x axis.
auto_color : bool, optional
Automatically color the plot lines.
interactive : bool, optional
If True, a secondary interactive plot will be generated next to spectrogram plots.
Mousing over the spectrogram will display a slice of data from that time on the
interactive chart.
combine_axes : bool, optional
If True, the axes are combined so that they all display the same x range. This also enables
scrolling/zooming/panning on one plot to affect all of the other plots simultaneously.
nb : bool, optional
If True, the plot will be displayed inside of a current Jupyter notebook session.
save_file : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in an HTML format.
The plots can then be opened and viewed on any browser without any requirements.
bokeh : bool, optional
If True, plots data using bokeh
Else (bokeh=False or omitted), plots data using PyQtGraph
gui : bool, optional
If True, then this function will output the 2 HTML components of the generated plots as string variables.
This is useful if you are embedded the plots in your own GUI. For more information, see
http://bokeh.pydata.org/en/latest/docs/user_guide/embed.html
qt : bool, optional
If True, then this function will display the plot inside of the Qt window. From this window, you
can choose to export the plots as either an HTML file, or as a PNG.
save_png : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in a PNG format.
display: bool, optional
If True, then this function will display the plotted tplot variables. Necessary to make this optional
so we can avoid it in a headless server environment.
testing: bool, optional
If True, doesn't run the '(hasattr(sys, 'ps1'))' line that makes plots interactive - i.e., avoiding issues
Returns:
None
Examples:
>>> #Plot a single line in bokeh
>>> import pytplot
>>> x_data = [2,3,4,5,6]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot("Variable1",bokeh=True)
>>> #Display two plots
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot(["Variable1", "Variable2"])
>>> #Display 2 plots, using Variable1 as another x axis
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> pytplot.options("Variable3", 'spec', 1)
>>> pytplot.tplot(["Variable2", "Variable3"], var_label='Variable1')
>>> #Plot all 3 tplot variables, sending the output to an HTML file
>>> pytplot.tplot(["Variable1", "Variable2", "Variable3"], save_file='C:/temp/pytplot_example.html')
>>> #Plot all 3 tplot variables, sending the HTML output to a pair of strings
>>> div, component = pytplot.tplot(["Variable1", "Variable2", "Variable3"], gui=True)
"""
if not pytplot.using_graphics and save_file is None:
print("Qt was not successfully imported. Specify save_file to save the file as a .html file.")
return
# Check a bunch of things
if not isinstance(name, list):
name = [name]
num_plots = 1
else:
num_plots = len(name)
for i in range(num_plots):
if isinstance(name[i], int):
name[i] = list(pytplot.data_quants.keys())[name[i]]
if name[i] not in pytplot.data_quants.keys():
print(str(i) + " is currently not in pytplot")
return
if isinstance(var_label, int):
var_label = list(pytplot.data_quants.keys())[var_label]
if bokeh:
layout = HTMLPlotter.generate_stack(name, var_label=var_label, auto_color=auto_color, combine_axes=combine_axes,
interactive=interactive)
# Output types
if gui:
script, div = components(layout)
return script, div
elif nb:
output_notebook()
show(layout)
return
elif save_file is not None:
output_file(save_file, mode='inline')
save(layout)
return
elif qt:
available_qt_window = tplot_utilities.get_available_qt_window()
dir_path = tempfile.gettempdir() # send to user's temp directory
output_file(os.path.join(dir_path, "temp.html"), mode='inline')
save(layout)
new_layout = WebView()
available_qt_window.resize(pytplot.tplot_opt_glob['window_size'][0] + 100,
pytplot.tplot_opt_glob['window_size'][1] + 100)
new_layout.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
dir_path = tempfile.gettempdir() # send to user's temp directory
new_layout.setUrl(QtCore.QUrl.fromLocalFile(os.path.join(dir_path, "temp.html")))
available_qt_window.newlayout(new_layout)
available_qt_window.show()
available_qt_window.activateWindow()
if testing:
return
if not (hasattr(sys, 'ps1')) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
return
else:
dir_path = tempfile.gettempdir() # send to user's temp directory
output_file(os.path.join(dir_path, "temp.html"), mode='inline')
show(layout)
return
else:
if save_png is not None:
layout = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
layout.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
for i, item in enumerate(layout.items()):
if type(item) == pg.graphicsItems.GraphicsLayout.GraphicsLayout:
layout.items()[i].resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
exporter = PyTPlot_Exporter.PytplotExporter(layout)
exporter.parameters()['width'] = pytplot.tplot_opt_glob['window_size'][0]
exporter.parameters()['height'] = pytplot.tplot_opt_glob['window_size'][1]
exporter.export(save_png)
if display:
# Set up all things needed for when a user asks to save plot from window
layout_orig = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
layout_orig.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
for i, item in enumerate(layout_orig.items()):
if type(item) == pg.graphicsItems.GraphicsLayout.GraphicsLayout:
layout_orig.items()[i].resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
exporter = QtPlotter.PytplotExporter(layout_orig)
# Set up displayed plot window and grab plots to plot on it
available_qt_window = tplot_utilities.get_available_qt_window()
layout = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
available_qt_window.newlayout(layout)
available_qt_window.resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
# Implement button that lets you save the PNG
available_qt_window.init_savepng(exporter)
# Show the plot window and plot
available_qt_window.show()
available_qt_window.activateWindow()
if interactive:
# Call 2D interactive window; This will only plot something when spectrograms are involved.
interactiveplot.interactiveplot()
static_list = [i for i in name if 'static' in pytplot.data_quants[i].extras]
for tplot_var in static_list:
# Call 2D static window; This will only plot something when spectrograms are involved.
staticplot.static2dplot(tplot_var, pytplot.data_quants[tplot_var].extras['static'])
static_tavg_list = [i for i in name if 'static_tavg' in pytplot.data_quants[i].extras]
for tplot_var in static_tavg_list:
# Call 2D static window for time-averaged values; This will only plot something when spectrograms
# are involved
staticplot_tavg.static2dplot_timeaveraged(
tplot_var, pytplot.data_quants[tplot_var].extras['static_tavg'])
# (hasattr(sys, 'ps1')) checks to see if we're in ipython
# plots the plots!
if testing:
return
if not (hasattr(sys, 'ps1')) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
return | python | def tplot(name,
var_label=None,
auto_color=True,
interactive=False,
combine_axes=True,
nb=False,
save_file=None,
gui=False,
qt=False,
bokeh=False,
save_png=None,
display=True,
testing=False):
"""
This is the function used to display the tplot variables stored in memory.
The default output is to show the plots stacked on top of one another inside a GUI window.
The GUI window has the option to export the plots in either PNG or HTML formats.
.. note::
This plotting routine uses the python Bokeh library, which creates plots using HTML and Javascript.
Bokeh is technically still in beta, so future patches to Bokeh may require updates to this function.
Parameters:
name : str / list
List of tplot variables that will be plotted
var_label : str, optional
The name of the tplot variable you would like as
a second x axis.
auto_color : bool, optional
Automatically color the plot lines.
interactive : bool, optional
If True, a secondary interactive plot will be generated next to spectrogram plots.
Mousing over the spectrogram will display a slice of data from that time on the
interactive chart.
combine_axes : bool, optional
If True, the axes are combined so that they all display the same x range. This also enables
scrolling/zooming/panning on one plot to affect all of the other plots simultaneously.
nb : bool, optional
If True, the plot will be displayed inside of a current Jupyter notebook session.
save_file : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in an HTML format.
The plots can then be opened and viewed on any browser without any requirements.
bokeh : bool, optional
If True, plots data using bokeh
Else (bokeh=False or omitted), plots data using PyQtGraph
gui : bool, optional
If True, then this function will output the 2 HTML components of the generated plots as string variables.
This is useful if you are embedded the plots in your own GUI. For more information, see
http://bokeh.pydata.org/en/latest/docs/user_guide/embed.html
qt : bool, optional
If True, then this function will display the plot inside of the Qt window. From this window, you
can choose to export the plots as either an HTML file, or as a PNG.
save_png : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in a PNG format.
display: bool, optional
If True, then this function will display the plotted tplot variables. Necessary to make this optional
so we can avoid it in a headless server environment.
testing: bool, optional
If True, doesn't run the '(hasattr(sys, 'ps1'))' line that makes plots interactive - i.e., avoiding issues
Returns:
None
Examples:
>>> #Plot a single line in bokeh
>>> import pytplot
>>> x_data = [2,3,4,5,6]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot("Variable1",bokeh=True)
>>> #Display two plots
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot(["Variable1", "Variable2"])
>>> #Display 2 plots, using Variable1 as another x axis
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> pytplot.options("Variable3", 'spec', 1)
>>> pytplot.tplot(["Variable2", "Variable3"], var_label='Variable1')
>>> #Plot all 3 tplot variables, sending the output to an HTML file
>>> pytplot.tplot(["Variable1", "Variable2", "Variable3"], save_file='C:/temp/pytplot_example.html')
>>> #Plot all 3 tplot variables, sending the HTML output to a pair of strings
>>> div, component = pytplot.tplot(["Variable1", "Variable2", "Variable3"], gui=True)
"""
if not pytplot.using_graphics and save_file is None:
print("Qt was not successfully imported. Specify save_file to save the file as a .html file.")
return
# Check a bunch of things
if not isinstance(name, list):
name = [name]
num_plots = 1
else:
num_plots = len(name)
for i in range(num_plots):
if isinstance(name[i], int):
name[i] = list(pytplot.data_quants.keys())[name[i]]
if name[i] not in pytplot.data_quants.keys():
print(str(i) + " is currently not in pytplot")
return
if isinstance(var_label, int):
var_label = list(pytplot.data_quants.keys())[var_label]
if bokeh:
layout = HTMLPlotter.generate_stack(name, var_label=var_label, auto_color=auto_color, combine_axes=combine_axes,
interactive=interactive)
# Output types
if gui:
script, div = components(layout)
return script, div
elif nb:
output_notebook()
show(layout)
return
elif save_file is not None:
output_file(save_file, mode='inline')
save(layout)
return
elif qt:
available_qt_window = tplot_utilities.get_available_qt_window()
dir_path = tempfile.gettempdir() # send to user's temp directory
output_file(os.path.join(dir_path, "temp.html"), mode='inline')
save(layout)
new_layout = WebView()
available_qt_window.resize(pytplot.tplot_opt_glob['window_size'][0] + 100,
pytplot.tplot_opt_glob['window_size'][1] + 100)
new_layout.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
dir_path = tempfile.gettempdir() # send to user's temp directory
new_layout.setUrl(QtCore.QUrl.fromLocalFile(os.path.join(dir_path, "temp.html")))
available_qt_window.newlayout(new_layout)
available_qt_window.show()
available_qt_window.activateWindow()
if testing:
return
if not (hasattr(sys, 'ps1')) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
return
else:
dir_path = tempfile.gettempdir() # send to user's temp directory
output_file(os.path.join(dir_path, "temp.html"), mode='inline')
show(layout)
return
else:
if save_png is not None:
layout = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
layout.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
for i, item in enumerate(layout.items()):
if type(item) == pg.graphicsItems.GraphicsLayout.GraphicsLayout:
layout.items()[i].resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
exporter = PyTPlot_Exporter.PytplotExporter(layout)
exporter.parameters()['width'] = pytplot.tplot_opt_glob['window_size'][0]
exporter.parameters()['height'] = pytplot.tplot_opt_glob['window_size'][1]
exporter.export(save_png)
if display:
# Set up all things needed for when a user asks to save plot from window
layout_orig = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
layout_orig.resize(pytplot.tplot_opt_glob['window_size'][0], pytplot.tplot_opt_glob['window_size'][1])
for i, item in enumerate(layout_orig.items()):
if type(item) == pg.graphicsItems.GraphicsLayout.GraphicsLayout:
layout_orig.items()[i].resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
exporter = QtPlotter.PytplotExporter(layout_orig)
# Set up displayed plot window and grab plots to plot on it
available_qt_window = tplot_utilities.get_available_qt_window()
layout = QtPlotter.generate_stack(name, var_label=var_label, combine_axes=combine_axes,
mouse_moved_event=pytplot.hover_time.change_hover_time)
available_qt_window.newlayout(layout)
available_qt_window.resize(pytplot.tplot_opt_glob['window_size'][0],
pytplot.tplot_opt_glob['window_size'][1])
# Implement button that lets you save the PNG
available_qt_window.init_savepng(exporter)
# Show the plot window and plot
available_qt_window.show()
available_qt_window.activateWindow()
if interactive:
# Call 2D interactive window; This will only plot something when spectrograms are involved.
interactiveplot.interactiveplot()
static_list = [i for i in name if 'static' in pytplot.data_quants[i].extras]
for tplot_var in static_list:
# Call 2D static window; This will only plot something when spectrograms are involved.
staticplot.static2dplot(tplot_var, pytplot.data_quants[tplot_var].extras['static'])
static_tavg_list = [i for i in name if 'static_tavg' in pytplot.data_quants[i].extras]
for tplot_var in static_tavg_list:
# Call 2D static window for time-averaged values; This will only plot something when spectrograms
# are involved
staticplot_tavg.static2dplot_timeaveraged(
tplot_var, pytplot.data_quants[tplot_var].extras['static_tavg'])
# (hasattr(sys, 'ps1')) checks to see if we're in ipython
# plots the plots!
if testing:
return
if not (hasattr(sys, 'ps1')) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
return | This is the function used to display the tplot variables stored in memory.
The default output is to show the plots stacked on top of one another inside a GUI window.
The GUI window has the option to export the plots in either PNG or HTML formats.
.. note::
This plotting routine uses the python Bokeh library, which creates plots using HTML and Javascript.
Bokeh is technically still in beta, so future patches to Bokeh may require updates to this function.
Parameters:
name : str / list
List of tplot variables that will be plotted
var_label : str, optional
The name of the tplot variable you would like as
a second x axis.
auto_color : bool, optional
Automatically color the plot lines.
interactive : bool, optional
If True, a secondary interactive plot will be generated next to spectrogram plots.
Mousing over the spectrogram will display a slice of data from that time on the
interactive chart.
combine_axes : bool, optional
If True, the axes are combined so that they all display the same x range. This also enables
scrolling/zooming/panning on one plot to affect all of the other plots simultaneously.
nb : bool, optional
If True, the plot will be displayed inside of a current Jupyter notebook session.
save_file : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in an HTML format.
The plots can then be opened and viewed on any browser without any requirements.
bokeh : bool, optional
If True, plots data using bokeh
Else (bokeh=False or omitted), plots data using PyQtGraph
gui : bool, optional
If True, then this function will output the 2 HTML components of the generated plots as string variables.
This is useful if you are embedded the plots in your own GUI. For more information, see
http://bokeh.pydata.org/en/latest/docs/user_guide/embed.html
qt : bool, optional
If True, then this function will display the plot inside of the Qt window. From this window, you
can choose to export the plots as either an HTML file, or as a PNG.
save_png : str, optional
A full file name and path.
If this option is set, the plot will be automatically saved to the file name provided in a PNG format.
display: bool, optional
If True, then this function will display the plotted tplot variables. Necessary to make this optional
so we can avoid it in a headless server environment.
testing: bool, optional
If True, doesn't run the '(hasattr(sys, 'ps1'))' line that makes plots interactive - i.e., avoiding issues
Returns:
None
Examples:
>>> #Plot a single line in bokeh
>>> import pytplot
>>> x_data = [2,3,4,5,6]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot("Variable1",bokeh=True)
>>> #Display two plots
>>> x_data = [1,2,3,4,5]
>>> y_data = [[1,5],[2,4],[3,3],[4,2],[5,1]]
>>> pytplot.store_data("Variable2", data={'x':x_data, 'y':y_data})
>>> pytplot.tplot(["Variable1", "Variable2"])
>>> #Display 2 plots, using Variable1 as another x axis
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> pytplot.options("Variable3", 'spec', 1)
>>> pytplot.tplot(["Variable2", "Variable3"], var_label='Variable1')
>>> #Plot all 3 tplot variables, sending the output to an HTML file
>>> pytplot.tplot(["Variable1", "Variable2", "Variable3"], save_file='C:/temp/pytplot_example.html')
>>> #Plot all 3 tplot variables, sending the HTML output to a pair of strings
>>> div, component = pytplot.tplot(["Variable1", "Variable2", "Variable3"], gui=True) | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot.py#L28-L245 |
MAVENSDC/PyTplot | pytplot/staticplot_tavg.py | static2dplot_timeaveraged | def static2dplot_timeaveraged(var, time):
""" If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram
plot(s) for which it is set will have another window pop up, where the displayed y and z values are
averaged by the number of seconds between the specified time range. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D static plot
pytplot.static_tavg_window = pg.GraphicsWindow()
pytplot.static_tavg_window.resize(1000, 600)
pytplot.static_tavg_window.setWindowTitle('Time-Averaged Values Static Window')
plot = pytplot.static_tavg_window.addPlot(title='2D Static Plot for Time-Averaged Values', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
if var in valid_variables:
# Get min/max values of data's time range (in both datetime and seconds since epoch)
t_min = np.nanmin(time_values)
t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values))
t_min_conv_back = tplot_utilities.str_to_int(t_min_str)
t_max = np.nanmax(time_values)
t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values))
t_max_conv_back = tplot_utilities.str_to_int(t_max_str)
# Convert user input to seconds since epoch
user_time = [tplot_utilities.str_to_int(i) for i in time]
# Covering situation where user entered a time not in the dataset!
# As long as they used a time in the dataset, this will not trigger.
for t, datetime in enumerate(user_time):
if datetime not in range(t_min_conv_back, t_max_conv_back+1):
while True:
try:
if t == 0:
time_bound = 'left bound'
else:
time_bound = 'right bound'
user_time[t] = tplot_utilities.str_to_int(input(
'Chosen {} time [{}] not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S).'.format(
time_bound, tplot_utilities.int_to_str(datetime), t_min_str, t_max_str)))
except ValueError:
continue
else:
if user_time[t] not in range(int(t_min), int(t_max)):
continue
else:
break
# Get index of the time closest to the user's time choice
time_array = np.array(data[var][2])
array = np.asarray(time_array)
idx = [(np.abs(array - i)).argmin() for i in user_time]
# Average values based on the chosen time range's indices
time_diff = abs(idx[0]-idx[1])
# Make sure to account for edge problem
if idx[1] != -1:
y_values_slice = data[name][1][idx[0]:idx[1]+1]
else:
y_values_slice = data[name][1][idx[0]:]
y_values_avgd = np.nansum(y_values_slice, axis=0)/np.float(time_diff)
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx[0]])) < 0 or np.nanmin(list(data[name][1][idx[1]])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx[0]])) >= 0 and np.nanmin(list(data[name][1][idx[1]])) >= 0 and \
labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# Plot data based on time we're hovering over
plot_data.setData(data[var][0][:], y_values_avgd) | python | def static2dplot_timeaveraged(var, time):
""" If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram
plot(s) for which it is set will have another window pop up, where the displayed y and z values are
averaged by the number of seconds between the specified time range. """
# Grab names of data loaded in as tplot variables.
names = list(pytplot.data_quants.keys())
# Get data we'll actually work with here.
valid_variables = tplot_utilities.get_data(names)
# Don't plot anything unless we have spectrograms with which to work.
if valid_variables:
# Get z label
labels = tplot_utilities.get_labels_axis_types(names)
# Put together data in easy-to-access format for plots.
data = {}
for name in valid_variables:
bins = tplot_utilities.get_bins(name)
time_values, z_values = tplot_utilities.get_z_t_values(name)
data[name] = [bins, z_values, time_values]
# Set up the 2D static plot
pytplot.static_tavg_window = pg.GraphicsWindow()
pytplot.static_tavg_window.resize(1000, 600)
pytplot.static_tavg_window.setWindowTitle('Time-Averaged Values Static Window')
plot = pytplot.static_tavg_window.addPlot(title='2D Static Plot for Time-Averaged Values', row=0, col=0)
# Make it so that whenever this first starts up, you just have an empty plot
plot_data = plot.plot([], [])
if var in valid_variables:
# Get min/max values of data's time range (in both datetime and seconds since epoch)
t_min = np.nanmin(time_values)
t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values))
t_min_conv_back = tplot_utilities.str_to_int(t_min_str)
t_max = np.nanmax(time_values)
t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values))
t_max_conv_back = tplot_utilities.str_to_int(t_max_str)
# Convert user input to seconds since epoch
user_time = [tplot_utilities.str_to_int(i) for i in time]
# Covering situation where user entered a time not in the dataset!
# As long as they used a time in the dataset, this will not trigger.
for t, datetime in enumerate(user_time):
if datetime not in range(t_min_conv_back, t_max_conv_back+1):
while True:
try:
if t == 0:
time_bound = 'left bound'
else:
time_bound = 'right bound'
user_time[t] = tplot_utilities.str_to_int(input(
'Chosen {} time [{}] not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S).'.format(
time_bound, tplot_utilities.int_to_str(datetime), t_min_str, t_max_str)))
except ValueError:
continue
else:
if user_time[t] not in range(int(t_min), int(t_max)):
continue
else:
break
# Get index of the time closest to the user's time choice
time_array = np.array(data[var][2])
array = np.asarray(time_array)
idx = [(np.abs(array - i)).argmin() for i in user_time]
# Average values based on the chosen time range's indices
time_diff = abs(idx[0]-idx[1])
# Make sure to account for edge problem
if idx[1] != -1:
y_values_slice = data[name][1][idx[0]:idx[1]+1]
else:
y_values_slice = data[name][1][idx[0]:]
y_values_avgd = np.nansum(y_values_slice, axis=0)/np.float(time_diff)
# If user indicated they wanted the interactive plot's axes to be logged, log 'em.
# But first make sure that values in x and y are loggable!
x_axis = False
y_axis = False
# Checking x axis
if np.nanmin(data[name][0][:]) < 0:
print('Negative data is incompatible with log plotting.')
elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log':
x_axis = True
# Checking y axis
if np.nanmin(list(data[name][1][idx[0]])) < 0 or np.nanmin(list(data[name][1][idx[1]])) < 0:
print('Negative data is incompatible with log plotting')
elif np.nanmin(list(data[name][1][idx[0]])) >= 0 and np.nanmin(list(data[name][1][idx[1]])) >= 0 and \
labels[name][3] == 'log':
y_axis = True
# Set plot labels
plot.setLabel('bottom', '{}'.format(labels[name][0]))
plot.setLabel('left', '{}'.format(labels[name][1]))
plot.setLogMode(x=x_axis, y=y_axis)
# Update x and y range if user modified it
tplot_utilities.set_x_range(name, x_axis, plot)
tplot_utilities.set_y_range(name, y_axis, plot)
# Plot data based on time we're hovering over
plot_data.setData(data[var][0][:], y_values_avgd) | If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram
plot(s) for which it is set will have another window pop up, where the displayed y and z values are
averaged by the number of seconds between the specified time range. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/staticplot_tavg.py#L8-L108 |
MAVENSDC/PyTplot | pytplot/tplot_save.py | tplot_save | def tplot_save(names, filename=None):
"""
This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
"""
if isinstance(names,int):
names = list(data_quants.keys())[names-1]
if not isinstance(names, list):
names = [names]
#Check that we have all available data
for name in names:
if isinstance(data_quants[name].data, list):
for data_name in data_quants[name].data:
if data_name not in names:
names.append(data_name)
#Pickle it up
to_pickle =[]
for name in names:
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
to_pickle.append(data_quants[name])
num_quants = len(to_pickle)
to_pickle = [num_quants] + to_pickle
temp_tplot_opt_glob = tplot_opt_glob
to_pickle.append(temp_tplot_opt_glob)
if filename==None:
filename='var_'+'-'.join(names)+'.pytplot'
pickle.dump(to_pickle, open(filename, "wb"))
return | python | def tplot_save(names, filename=None):
"""
This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
"""
if isinstance(names,int):
names = list(data_quants.keys())[names-1]
if not isinstance(names, list):
names = [names]
#Check that we have all available data
for name in names:
if isinstance(data_quants[name].data, list):
for data_name in data_quants[name].data:
if data_name not in names:
names.append(data_name)
#Pickle it up
to_pickle =[]
for name in names:
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
to_pickle.append(data_quants[name])
num_quants = len(to_pickle)
to_pickle = [num_quants] + to_pickle
temp_tplot_opt_glob = tplot_opt_glob
to_pickle.append(temp_tplot_opt_glob)
if filename==None:
filename='var_'+'-'.join(names)+'.pytplot'
pickle.dump(to_pickle, open(filename, "wb"))
return | This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot') | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_save.py#L9-L65 |
MAVENSDC/PyTplot | pytplot/get_ylimits.py | get_ylimits | def get_ylimits(name, trg=None):
"""
This function will get extract the y-limits from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
trg : list, optional
The time range that you would like to look in
Returns:
ymin : float
The minimum value of y
ymax : float
The maximum value of y
Examples:
>>> # Retrieve the y-limits from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> y1, y2 = pytplot.get_ylimits("Variable1")
"""
if isinstance(name, int):
name = list(data_quants.keys())[name-1]
if not isinstance(name, list):
name = [name]
name_num = len(name)
ymin = None
ymax = None
for i in range(name_num):
if name[i] not in data_quants.keys():
print(str(name[i]) + " is currently not in pytplot.")
return
temp_data_quant = data_quants[name[i]]
yother = temp_data_quant.data
if trg is not None:
for column_name in yother.columns:
y = yother[column_name]
trunc_tempt_data_quant = y.truncate(before=trg[0], after=trg[1])
loc_min = trunc_tempt_data_quant.min(skipna=True)
loc_max = trunc_tempt_data_quant.max(skipna=True)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
else:
for column_name in yother.columns:
y = yother[column_name]
loc_min = y.min(skipna=True)
loc_max = y.max(skipna=False)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
print("Y Minimum: " + str(ymin))
print("Y Maximum: " + str(ymax))
return ymin, ymax | python | def get_ylimits(name, trg=None):
"""
This function will get extract the y-limits from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
trg : list, optional
The time range that you would like to look in
Returns:
ymin : float
The minimum value of y
ymax : float
The maximum value of y
Examples:
>>> # Retrieve the y-limits from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> y1, y2 = pytplot.get_ylimits("Variable1")
"""
if isinstance(name, int):
name = list(data_quants.keys())[name-1]
if not isinstance(name, list):
name = [name]
name_num = len(name)
ymin = None
ymax = None
for i in range(name_num):
if name[i] not in data_quants.keys():
print(str(name[i]) + " is currently not in pytplot.")
return
temp_data_quant = data_quants[name[i]]
yother = temp_data_quant.data
if trg is not None:
for column_name in yother.columns:
y = yother[column_name]
trunc_tempt_data_quant = y.truncate(before=trg[0], after=trg[1])
loc_min = trunc_tempt_data_quant.min(skipna=True)
loc_max = trunc_tempt_data_quant.max(skipna=True)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
else:
for column_name in yother.columns:
y = yother[column_name]
loc_min = y.min(skipna=True)
loc_max = y.max(skipna=False)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
print("Y Minimum: " + str(ymin))
print("Y Maximum: " + str(ymax))
return ymin, ymax | This function will get extract the y-limits from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
trg : list, optional
The time range that you would like to look in
Returns:
ymin : float
The minimum value of y
ymax : float
The maximum value of y
Examples:
>>> # Retrieve the y-limits from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> y1, y2 = pytplot.get_ylimits("Variable1") | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/get_ylimits.py#L9-L69 |
MAVENSDC/PyTplot | pytplot/timestamp.py | timestamp | def timestamp(val):
"""
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
"""
if val is 'on':
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring
else:
if 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp']
return | python | def timestamp(val):
"""
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
"""
if val is 'on':
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring
else:
if 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp']
return | This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on') | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/timestamp.py#L9-L35 |
MAVENSDC/PyTplot | pytplot/QtPlotter/CustomImage/UpdatingImage.py | makeARGBwithNaNs | def makeARGBwithNaNs(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
This is the same as pyqtgraph.makeARGB, except that all NaN's in the data are set to transparent pixels
"""
nanlocations = np.isnan(data)
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = np.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0] - 1
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
newData[...,i] = fn.rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal += 1e-16
data = fn.rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = fn.applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
#Set NaNs to transparent
data[nanlocations] = [0,0,0,0]
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha | python | def makeARGBwithNaNs(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
This is the same as pyqtgraph.makeARGB, except that all NaN's in the data are set to transparent pixels
"""
nanlocations = np.isnan(data)
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = np.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0] - 1
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
newData[...,i] = fn.rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal += 1e-16
data = fn.rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = fn.applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
#Set NaNs to transparent
data[nanlocations] = [0,0,0,0]
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha | This is the same as pyqtgraph.makeARGB, except that all NaN's in the data are set to transparent pixels | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/CustomImage/UpdatingImage.py#L225-L351 |
MAVENSDC/PyTplot | pytplot/QtPlotter/CustomImage/UpdatingImage.py | UpdatingImage.paint | def paint(self, p, *args):
'''
I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly.
'''
if self.picturenotgened:
self.generatePicture(self.getBoundingParents()[0].rect())
self.picturenotgened = False
pg.ImageItem.paint(self, p, *args)
self.generatePicture(self.getBoundingParents()[0].rect()) | python | def paint(self, p, *args):
'''
I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly.
'''
if self.picturenotgened:
self.generatePicture(self.getBoundingParents()[0].rect())
self.picturenotgened = False
pg.ImageItem.paint(self, p, *args)
self.generatePicture(self.getBoundingParents()[0].rect()) | I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/CustomImage/UpdatingImage.py#L94-L103 |
MAVENSDC/PyTplot | pytplot/QtPlotter/CustomImage/UpdatingImage.py | UpdatingImage.setImage | def setImage(self, image=None, autoLevels=None, **kargs):
"""
Same this as ImageItem.setImage, but we don't update the drawing
"""
profile = debug.Profiler()
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.view(np.ndarray)
if self.image is None or image.dtype != self.image.dtype:
self._effectiveLut = None
self.image = image
if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:
if 'autoDownsample' not in kargs:
kargs['autoDownsample'] = True
if shapeChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
profile()
if autoLevels is None:
if 'levels' in kargs:
autoLevels = False
else:
autoLevels = True
if autoLevels:
img = self.image
while img.size > 2**16:
img = img[::2, ::2]
mn, mx = img.min(), img.max()
if mn == mx:
mn = 0
mx = 255
kargs['levels'] = [mn,mx]
profile()
self.setOpts(update=False, **kargs)
profile()
self.qimage = None
self.update()
profile()
if gotNewData:
self.sigImageChanged.emit() | python | def setImage(self, image=None, autoLevels=None, **kargs):
"""
Same this as ImageItem.setImage, but we don't update the drawing
"""
profile = debug.Profiler()
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.view(np.ndarray)
if self.image is None or image.dtype != self.image.dtype:
self._effectiveLut = None
self.image = image
if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:
if 'autoDownsample' not in kargs:
kargs['autoDownsample'] = True
if shapeChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
profile()
if autoLevels is None:
if 'levels' in kargs:
autoLevels = False
else:
autoLevels = True
if autoLevels:
img = self.image
while img.size > 2**16:
img = img[::2, ::2]
mn, mx = img.min(), img.max()
if mn == mx:
mn = 0
mx = 255
kargs['levels'] = [mn,mx]
profile()
self.setOpts(update=False, **kargs)
profile()
self.qimage = None
self.update()
profile()
if gotNewData:
self.sigImageChanged.emit() | Same this as ImageItem.setImage, but we don't update the drawing | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/CustomImage/UpdatingImage.py#L168-L222 |
MAVENSDC/PyTplot | pytplot/QtPlotter/PyTPlot_Exporter.py | PytplotExporter.getPaintItems | def getPaintItems(self, root=None):
"""Return a list of all items that should be painted in the correct order."""
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtGui.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
# CHANGE: For GraphicsLayouts, there is no function for childItems(), so I just
# replaced it with .items()
try:
childs = root.childItems()
except:
childs = root.items()
rootItem = [root]
childs.sort(key=lambda a: a.zValue())
while len(childs) > 0:
ch = childs.pop(0)
tree = self.getPaintItems(ch)
if int(ch.flags() & ch.ItemStacksBehindParent) > 0 or (
ch.zValue() < 0 and int(ch.flags() & ch.ItemNegativeZStacksBehindParent) > 0):
preItems.extend(tree)
else:
postItems.extend(tree)
return preItems + rootItem + postItems | python | def getPaintItems(self, root=None):
"""Return a list of all items that should be painted in the correct order."""
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtGui.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
# CHANGE: For GraphicsLayouts, there is no function for childItems(), so I just
# replaced it with .items()
try:
childs = root.childItems()
except:
childs = root.items()
rootItem = [root]
childs.sort(key=lambda a: a.zValue())
while len(childs) > 0:
ch = childs.pop(0)
tree = self.getPaintItems(ch)
if int(ch.flags() & ch.ItemStacksBehindParent) > 0 or (
ch.zValue() < 0 and int(ch.flags() & ch.ItemNegativeZStacksBehindParent) > 0):
preItems.extend(tree)
else:
postItems.extend(tree)
return preItems + rootItem + postItems | Return a list of all items that should be painted in the correct order. | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/QtPlotter/PyTPlot_Exporter.py#L92-L119 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/normalize_fun.py | run_norm | def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False):
'''
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
'''
# df here is actually a dictionary of several dataframes, 'mat', 'mat_orig',
# etc
if df is None:
df = net.dat_to_df()
if norm_type == 'zscore':
df = zscore_df(df, axis, keep_orig)
if norm_type == 'qn':
df = qn_df(df, axis, keep_orig)
net.df_to_dat(df) | python | def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False):
'''
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
'''
# df here is actually a dictionary of several dataframes, 'mat', 'mat_orig',
# etc
if df is None:
df = net.dat_to_df()
if norm_type == 'zscore':
df = zscore_df(df, axis, keep_orig)
if norm_type == 'qn':
df = qn_df(df, axis, keep_orig)
net.df_to_dat(df) | A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/normalize_fun.py#L5-L23 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/normalize_fun.py | qn_df | def qn_df(df, axis='row', keep_orig=False):
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of missing values
if missing_values:
# get nan mask
missing_mask = pd.isnull(inst_df)
# tmp fill in na with zero, will not affect qn
inst_df = inst_df.fillna(value=0)
# calc common distribution
common_dist = calc_common_dist(inst_df)
# swap in common distribution
inst_df = swap_in_common_dist(inst_df, common_dist)
# swap back in missing values
if missing_values:
inst_df = inst_df.mask(missing_mask, other=np.nan)
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
df_qn[mat_type] = inst_df
return df_qn | python | def qn_df(df, axis='row', keep_orig=False):
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of missing values
if missing_values:
# get nan mask
missing_mask = pd.isnull(inst_df)
# tmp fill in na with zero, will not affect qn
inst_df = inst_df.fillna(value=0)
# calc common distribution
common_dist = calc_common_dist(inst_df)
# swap in common distribution
inst_df = swap_in_common_dist(inst_df, common_dist)
# swap back in missing values
if missing_values:
inst_df = inst_df.mask(missing_mask, other=np.nan)
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
df_qn[mat_type] = inst_df
return df_qn | do quantile normalization of a dataframe dictionary, does not write to net | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/normalize_fun.py#L25-L65 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/normalize_fun.py | calc_common_dist | def calc_common_dist(df):
'''
calculate a common distribution (for col qn only) that will be used to qn
'''
# axis is col
tmp_arr = np.array([])
col_names = df.columns.tolist()
for inst_col in col_names:
# sort column
tmp_vect = df[inst_col].sort_values(ascending=False).values
# stacking rows vertically (will transpose)
if tmp_arr.shape[0] == 0:
tmp_arr = tmp_vect
else:
tmp_arr = np.vstack((tmp_arr, tmp_vect))
tmp_arr = tmp_arr.transpose()
common_dist = tmp_arr.mean(axis=1)
return common_dist | python | def calc_common_dist(df):
'''
calculate a common distribution (for col qn only) that will be used to qn
'''
# axis is col
tmp_arr = np.array([])
col_names = df.columns.tolist()
for inst_col in col_names:
# sort column
tmp_vect = df[inst_col].sort_values(ascending=False).values
# stacking rows vertically (will transpose)
if tmp_arr.shape[0] == 0:
tmp_arr = tmp_vect
else:
tmp_arr = np.vstack((tmp_arr, tmp_vect))
tmp_arr = tmp_arr.transpose()
common_dist = tmp_arr.mean(axis=1)
return common_dist | calculate a common distribution (for col qn only) that will be used to qn | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/normalize_fun.py#L100-L125 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/normalize_fun.py | zscore_df | def zscore_df(df, axis='row', keep_orig=False):
'''
take the zscore of a dataframe dictionary, does not write to net (self)
'''
df_z = {}
for mat_type in df:
if keep_orig and mat_type == 'mat':
mat_orig = deepcopy(df[mat_type])
inst_df = df[mat_type]
if axis == 'row':
inst_df = inst_df.transpose()
df_z[mat_type] = (inst_df - inst_df.mean())/inst_df.std()
if axis == 'row':
df_z[mat_type] = df_z[mat_type].transpose()
if keep_orig:
df_z['mat_orig'] = mat_orig
return df_z | python | def zscore_df(df, axis='row', keep_orig=False):
'''
take the zscore of a dataframe dictionary, does not write to net (self)
'''
df_z = {}
for mat_type in df:
if keep_orig and mat_type == 'mat':
mat_orig = deepcopy(df[mat_type])
inst_df = df[mat_type]
if axis == 'row':
inst_df = inst_df.transpose()
df_z[mat_type] = (inst_df - inst_df.mean())/inst_df.std()
if axis == 'row':
df_z[mat_type] = df_z[mat_type].transpose()
if keep_orig:
df_z['mat_orig'] = mat_orig
return df_z | take the zscore of a dataframe dictionary, does not write to net (self) | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/normalize_fun.py#L127-L150 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/cat_pval.py | main | def main(net):
'''
calculate pvalue of category closeness
'''
# calculate the distance between the data points within the same category and
# compare to null distribution
for inst_rc in ['row', 'col']:
inst_nodes = deepcopy(net.dat['nodes'][inst_rc])
inst_index = deepcopy(net.dat['node_info'][inst_rc]['clust'])
# reorder based on clustered order
inst_nodes = [ inst_nodes[i] for i in inst_index]
# make distance matrix dataframe
dm = dist_matrix_lattice(inst_nodes)
node_infos = list(net.dat['node_info'][inst_rc].keys())
all_cats = []
for inst_info in node_infos:
if 'dict_cat_' in inst_info:
all_cats.append(inst_info)
for cat_dict in all_cats:
tmp_dict = net.dat['node_info'][inst_rc][cat_dict]
pval_name = cat_dict.replace('dict_','pval_')
net.dat['node_info'][inst_rc][pval_name] = {}
for cat_name in tmp_dict:
subset = tmp_dict[cat_name]
inst_median = calc_median_dist_subset(dm, subset)
hist = calc_hist_distances(dm, subset, inst_nodes)
pval = 0
for i in range(len(hist['prob'])):
if i == 0:
pval = hist['prob'][i]
if i >= 1:
if inst_median >= hist['bins'][i]:
pval = pval + hist['prob'][i]
net.dat['node_info'][inst_rc][pval_name][cat_name] = pval | python | def main(net):
'''
calculate pvalue of category closeness
'''
# calculate the distance between the data points within the same category and
# compare to null distribution
for inst_rc in ['row', 'col']:
inst_nodes = deepcopy(net.dat['nodes'][inst_rc])
inst_index = deepcopy(net.dat['node_info'][inst_rc]['clust'])
# reorder based on clustered order
inst_nodes = [ inst_nodes[i] for i in inst_index]
# make distance matrix dataframe
dm = dist_matrix_lattice(inst_nodes)
node_infos = list(net.dat['node_info'][inst_rc].keys())
all_cats = []
for inst_info in node_infos:
if 'dict_cat_' in inst_info:
all_cats.append(inst_info)
for cat_dict in all_cats:
tmp_dict = net.dat['node_info'][inst_rc][cat_dict]
pval_name = cat_dict.replace('dict_','pval_')
net.dat['node_info'][inst_rc][pval_name] = {}
for cat_name in tmp_dict:
subset = tmp_dict[cat_name]
inst_median = calc_median_dist_subset(dm, subset)
hist = calc_hist_distances(dm, subset, inst_nodes)
pval = 0
for i in range(len(hist['prob'])):
if i == 0:
pval = hist['prob'][i]
if i >= 1:
if inst_median >= hist['bins'][i]:
pval = pval + hist['prob'][i]
net.dat['node_info'][inst_rc][pval_name][cat_name] = pval | calculate pvalue of category closeness | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/cat_pval.py#L5-L54 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/proc_df_labels.py | main | def main(df):
'''
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
'''
import numpy as np
from ast import literal_eval as make_tuple
test = {}
test['row'] = df['mat'].index.tolist()
test['col'] = df['mat'].columns.tolist()
# if type( test_row ) is not str and type( test_row ) is not tuple:
found_tuple = {}
found_number = {}
for inst_rc in ['row','col']:
inst_name = test[inst_rc][0]
found_tuple[inst_rc] = False
found_number[inst_rc] = False
if type(inst_name) != tuple:
if type(inst_name) is int or type(inst_name) is float or type(inst_name) is np.int64:
found_number[inst_rc] = True
else:
check_open = inst_name[0]
check_comma = inst_name.find(',')
check_close = inst_name[-1]
if check_open == '(' and check_close == ')' and check_comma > 0 \
and check_comma < len(inst_name):
found_tuple[inst_rc] = True
# convert to tuple if necessary
#################################################
if found_tuple['row']:
row_names = df['mat'].index.tolist()
row_names = [make_tuple(x) for x in row_names]
df['mat'].index = row_names
if found_tuple['col']:
col_names = df['mat'].columns.tolist()
col_names = [make_tuple(x) for x in col_names]
df['mat'].columns = col_names
# convert numbers to string if necessary
#################################################
if found_number['row']:
row_names = df['mat'].index.tolist()
row_names = [str(x) for x in row_names]
df['mat'].index = row_names
if found_number['col']:
col_names = df['mat'].columns.tolist()
col_names = [str(x) for x in col_names]
df['mat'].columns = col_names
return df | python | def main(df):
'''
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
'''
import numpy as np
from ast import literal_eval as make_tuple
test = {}
test['row'] = df['mat'].index.tolist()
test['col'] = df['mat'].columns.tolist()
# if type( test_row ) is not str and type( test_row ) is not tuple:
found_tuple = {}
found_number = {}
for inst_rc in ['row','col']:
inst_name = test[inst_rc][0]
found_tuple[inst_rc] = False
found_number[inst_rc] = False
if type(inst_name) != tuple:
if type(inst_name) is int or type(inst_name) is float or type(inst_name) is np.int64:
found_number[inst_rc] = True
else:
check_open = inst_name[0]
check_comma = inst_name.find(',')
check_close = inst_name[-1]
if check_open == '(' and check_close == ')' and check_comma > 0 \
and check_comma < len(inst_name):
found_tuple[inst_rc] = True
# convert to tuple if necessary
#################################################
if found_tuple['row']:
row_names = df['mat'].index.tolist()
row_names = [make_tuple(x) for x in row_names]
df['mat'].index = row_names
if found_tuple['col']:
col_names = df['mat'].columns.tolist()
col_names = [make_tuple(x) for x in col_names]
df['mat'].columns = col_names
# convert numbers to string if necessary
#################################################
if found_number['row']:
row_names = df['mat'].index.tolist()
row_names = [str(x) for x in row_names]
df['mat'].index = row_names
if found_number['col']:
col_names = df['mat'].columns.tolist()
col_names = [str(x) for x in col_names]
df['mat'].columns = col_names
return df | 1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/proc_df_labels.py#L1-L62 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/data_formats.py | df_to_dat | def df_to_dat(net, df, define_cat_colors=False):
'''
This is always run when data is loaded.
'''
from . import categories
# check if df has unique values
df['mat'] = make_unique_labels.main(net, df['mat'])
net.dat['mat'] = df['mat'].values
net.dat['nodes']['row'] = df['mat'].index.tolist()
net.dat['nodes']['col'] = df['mat'].columns.tolist()
for inst_rc in ['row', 'col']:
if type(net.dat['nodes'][inst_rc][0]) is tuple:
# get the number of categories from the length of the tuple
# subtract 1 because the name is the first element of the tuple
num_cat = len(net.dat['nodes'][inst_rc][0]) - 1
net.dat['node_info'][inst_rc]['full_names'] = net.dat['nodes']\
[inst_rc]
for inst_rcat in range(num_cat):
net.dat['node_info'][inst_rc]['cat-' + str(inst_rcat)] = \
[i[inst_rcat + 1] for i in net.dat['nodes'][inst_rc]]
net.dat['nodes'][inst_rc] = [i[0] for i in net.dat['nodes'][inst_rc]]
if 'mat_up' in df:
net.dat['mat_up'] = df['mat_up'].values
net.dat['mat_dn'] = df['mat_dn'].values
if 'mat_orig' in df:
net.dat['mat_orig'] = df['mat_orig'].values
categories.dict_cat(net, define_cat_colors=define_cat_colors) | python | def df_to_dat(net, df, define_cat_colors=False):
'''
This is always run when data is loaded.
'''
from . import categories
# check if df has unique values
df['mat'] = make_unique_labels.main(net, df['mat'])
net.dat['mat'] = df['mat'].values
net.dat['nodes']['row'] = df['mat'].index.tolist()
net.dat['nodes']['col'] = df['mat'].columns.tolist()
for inst_rc in ['row', 'col']:
if type(net.dat['nodes'][inst_rc][0]) is tuple:
# get the number of categories from the length of the tuple
# subtract 1 because the name is the first element of the tuple
num_cat = len(net.dat['nodes'][inst_rc][0]) - 1
net.dat['node_info'][inst_rc]['full_names'] = net.dat['nodes']\
[inst_rc]
for inst_rcat in range(num_cat):
net.dat['node_info'][inst_rc]['cat-' + str(inst_rcat)] = \
[i[inst_rcat + 1] for i in net.dat['nodes'][inst_rc]]
net.dat['nodes'][inst_rc] = [i[0] for i in net.dat['nodes'][inst_rc]]
if 'mat_up' in df:
net.dat['mat_up'] = df['mat_up'].values
net.dat['mat_dn'] = df['mat_dn'].values
if 'mat_orig' in df:
net.dat['mat_orig'] = df['mat_orig'].values
categories.dict_cat(net, define_cat_colors=define_cat_colors) | This is always run when data is loaded. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/data_formats.py#L3-L39 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/data_formats.py | mat_to_numpy_arr | def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray(self.dat['mat']) | python | def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray(self.dat['mat']) | convert list to numpy array - numpy arrays can not be saved as json | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/data_formats.py#L69-L72 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/calc_clust.py | cluster_row_and_col | def cluster_row_and_col(net, dist_type='cosine', linkage_type='average',
dendro=True, run_clustering=True, run_rank=True,
ignore_cat=False, calc_cat_pval=False, links=False):
''' cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument '''
import scipy
from copy import deepcopy
from scipy.spatial.distance import pdist
from . import categories, make_viz, cat_pval
dm = {}
for inst_rc in ['row', 'col']:
tmp_mat = deepcopy(net.dat['mat'])
dm[inst_rc] = calc_distance_matrix(tmp_mat, inst_rc, dist_type)
# save directly to dat structure
node_info = net.dat['node_info'][inst_rc]
node_info['ini'] = list(range( len(net.dat['nodes'][inst_rc]), -1, -1))
# cluster
if run_clustering is True:
node_info['clust'], node_info['group'] = \
clust_and_group(net, dm[inst_rc], linkage_type=linkage_type)
else:
dendro = False
node_info['clust'] = node_info['ini']
# sorting
if run_rank is True:
node_info['rank'] = sort_rank_nodes(net, inst_rc, 'sum')
node_info['rankvar'] = sort_rank_nodes(net, inst_rc, 'var')
else:
node_info['rank'] = node_info['ini']
node_info['rankvar'] = node_info['ini']
##################################
if ignore_cat is False:
categories.calc_cat_clust_order(net, inst_rc)
if calc_cat_pval is True:
cat_pval.main(net)
# make the visualization json
make_viz.viz_json(net, dendro, links)
return dm | python | def cluster_row_and_col(net, dist_type='cosine', linkage_type='average',
dendro=True, run_clustering=True, run_rank=True,
ignore_cat=False, calc_cat_pval=False, links=False):
''' cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument '''
import scipy
from copy import deepcopy
from scipy.spatial.distance import pdist
from . import categories, make_viz, cat_pval
dm = {}
for inst_rc in ['row', 'col']:
tmp_mat = deepcopy(net.dat['mat'])
dm[inst_rc] = calc_distance_matrix(tmp_mat, inst_rc, dist_type)
# save directly to dat structure
node_info = net.dat['node_info'][inst_rc]
node_info['ini'] = list(range( len(net.dat['nodes'][inst_rc]), -1, -1))
# cluster
if run_clustering is True:
node_info['clust'], node_info['group'] = \
clust_and_group(net, dm[inst_rc], linkage_type=linkage_type)
else:
dendro = False
node_info['clust'] = node_info['ini']
# sorting
if run_rank is True:
node_info['rank'] = sort_rank_nodes(net, inst_rc, 'sum')
node_info['rankvar'] = sort_rank_nodes(net, inst_rc, 'var')
else:
node_info['rank'] = node_info['ini']
node_info['rankvar'] = node_info['ini']
##################################
if ignore_cat is False:
categories.calc_cat_clust_order(net, inst_rc)
if calc_cat_pval is True:
cat_pval.main(net)
# make the visualization json
make_viz.viz_json(net, dendro, links)
return dm | cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/calc_clust.py#L1-L49 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/categories.py | check_categories | def check_categories(lines):
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels | python | def check_categories(lines):
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels | find out how many row and col categories are available | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L1-L37 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/categories.py | dict_cat | def dict_cat(net, define_cat_colors=False):
'''
make a dictionary of node-category associations
'''
# print('---------------------------------')
# print('---- dict_cat: before setting cat colors')
# print('---------------------------------\n')
# print(define_cat_colors)
# print(net.viz['cat_colors'])
net.persistent_cat = True
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for inst_name_cat in all_cats:
dict_cat = {}
tmp_cats = net.dat['node_info'][inst_rc][inst_name_cat]
tmp_nodes = net.dat['nodes'][inst_rc]
for i in range(len(tmp_cats)):
inst_cat = tmp_cats[i]
inst_node = tmp_nodes[i]
if inst_cat not in dict_cat:
dict_cat[inst_cat] = []
dict_cat[inst_cat].append(inst_node)
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
net.dat['node_info'][inst_rc][tmp_name] = dict_cat
# merge with old cat_colors by default
cat_colors = net.viz['cat_colors']
if define_cat_colors == True:
cat_number = 0
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for cat_index in all_cats:
if cat_index not in cat_colors[inst_rc]:
cat_colors[inst_rc][cat_index] = {}
cat_names = sorted(list(set(net.dat['node_info'][inst_rc][cat_index])))
# loop through each category name and assign a color
for tmp_name in cat_names:
# using the same rules as the front-end to define cat_colors
inst_color = get_cat_color(cat_number + cat_names.index(tmp_name))
check_name = tmp_name
# check if category is string type and non-numeric
try:
float(check_name)
is_string_cat = False
except:
is_string_cat = True
if is_string_cat == True:
# check for default non-color
if ': ' in check_name:
check_name = check_name.split(': ')[1]
# if check_name == 'False' or check_name == 'false':
if 'False' in check_name or 'false' in check_name:
inst_color = '#eee'
if 'Not ' in check_name:
inst_color = '#eee'
# print('cat_colors')
# print('----------')
# print(cat_colors[inst_rc][cat_index])
# do not overwrite old colors
if tmp_name not in cat_colors[inst_rc][cat_index] and is_string_cat:
cat_colors[inst_rc][cat_index][tmp_name] = inst_color
# print('overwrite: ' + tmp_name + ' -> ' + str(inst_color))
cat_number = cat_number + 1
net.viz['cat_colors'] = cat_colors | python | def dict_cat(net, define_cat_colors=False):
'''
make a dictionary of node-category associations
'''
# print('---------------------------------')
# print('---- dict_cat: before setting cat colors')
# print('---------------------------------\n')
# print(define_cat_colors)
# print(net.viz['cat_colors'])
net.persistent_cat = True
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for inst_name_cat in all_cats:
dict_cat = {}
tmp_cats = net.dat['node_info'][inst_rc][inst_name_cat]
tmp_nodes = net.dat['nodes'][inst_rc]
for i in range(len(tmp_cats)):
inst_cat = tmp_cats[i]
inst_node = tmp_nodes[i]
if inst_cat not in dict_cat:
dict_cat[inst_cat] = []
dict_cat[inst_cat].append(inst_node)
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
net.dat['node_info'][inst_rc][tmp_name] = dict_cat
# merge with old cat_colors by default
cat_colors = net.viz['cat_colors']
if define_cat_colors == True:
cat_number = 0
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for cat_index in all_cats:
if cat_index not in cat_colors[inst_rc]:
cat_colors[inst_rc][cat_index] = {}
cat_names = sorted(list(set(net.dat['node_info'][inst_rc][cat_index])))
# loop through each category name and assign a color
for tmp_name in cat_names:
# using the same rules as the front-end to define cat_colors
inst_color = get_cat_color(cat_number + cat_names.index(tmp_name))
check_name = tmp_name
# check if category is string type and non-numeric
try:
float(check_name)
is_string_cat = False
except:
is_string_cat = True
if is_string_cat == True:
# check for default non-color
if ': ' in check_name:
check_name = check_name.split(': ')[1]
# if check_name == 'False' or check_name == 'false':
if 'False' in check_name or 'false' in check_name:
inst_color = '#eee'
if 'Not ' in check_name:
inst_color = '#eee'
# print('cat_colors')
# print('----------')
# print(cat_colors[inst_rc][cat_index])
# do not overwrite old colors
if tmp_name not in cat_colors[inst_rc][cat_index] and is_string_cat:
cat_colors[inst_rc][cat_index][tmp_name] = inst_color
# print('overwrite: ' + tmp_name + ' -> ' + str(inst_color))
cat_number = cat_number + 1
net.viz['cat_colors'] = cat_colors | make a dictionary of node-category associations | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L39-L131 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/categories.py | calc_cat_clust_order | def calc_cat_clust_order(net, inst_rc):
'''
cluster category subset of data
'''
from .__init__ import Network
from copy import deepcopy
from . import calc_clust, run_filter
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
if len(all_cats) > 0:
for inst_name_cat in all_cats:
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
dict_cat = net.dat['node_info'][inst_rc][tmp_name]
unordered_cats = dict_cat.keys()
ordered_cats = order_categories(unordered_cats)
# this is the ordering of the columns based on their category, not
# including their clustering ordering within category
all_cat_orders = []
tmp_names_list = []
for inst_cat in ordered_cats:
inst_nodes = dict_cat[inst_cat]
tmp_names_list.extend(inst_nodes)
# cat_net = deepcopy(Network())
# cat_net.dat['mat'] = deepcopy(net.dat['mat'])
# cat_net.dat['nodes'] = deepcopy(net.dat['nodes'])
# cat_df = cat_net.dat_to_df()
# sub_df = {}
# if inst_rc == 'col':
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# elif inst_rc == 'row':
# # need to transpose df
# cat_df['mat'] = cat_df['mat'].transpose()
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# sub_df['mat'] = sub_df['mat'].transpose()
# # filter matrix before clustering
# ###################################
# threshold = 0.0001
# sub_df = run_filter.df_filter_row_sum(sub_df, threshold)
# sub_df = run_filter.df_filter_col_sum(sub_df, threshold)
# # load back to dat
# cat_net.df_to_dat(sub_df)
# cat_mat_shape = cat_net.dat['mat'].shape
# print('***************')
# try:
# if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False:
# calc_clust.cluster_row_and_col(cat_net, 'cos')
# inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust']
# else:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# except:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# prev_order_len = len(all_cat_orders)
# # add prev order length to the current order number
# inst_cat_order = [i + prev_order_len for i in inst_cat_order]
# all_cat_orders.extend(inst_cat_order)
# # generate ordered list of row/col names, which will be used to
# # assign the order to specific nodes
# names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders,
# tmp_names_list))]
names_clust_list = tmp_names_list
# calc category-cluster order
final_order = []
for i in range(len(net.dat['nodes'][inst_rc])):
inst_node_name = net.dat['nodes'][inst_rc][i]
inst_node_num = names_clust_list.index(inst_node_name)
final_order.append(inst_node_num)
inst_index_cat = inst_name_cat.replace('-', '_') + '_index'
net.dat['node_info'][inst_rc][inst_index_cat] = final_order | python | def calc_cat_clust_order(net, inst_rc):
'''
cluster category subset of data
'''
from .__init__ import Network
from copy import deepcopy
from . import calc_clust, run_filter
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
if len(all_cats) > 0:
for inst_name_cat in all_cats:
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
dict_cat = net.dat['node_info'][inst_rc][tmp_name]
unordered_cats = dict_cat.keys()
ordered_cats = order_categories(unordered_cats)
# this is the ordering of the columns based on their category, not
# including their clustering ordering within category
all_cat_orders = []
tmp_names_list = []
for inst_cat in ordered_cats:
inst_nodes = dict_cat[inst_cat]
tmp_names_list.extend(inst_nodes)
# cat_net = deepcopy(Network())
# cat_net.dat['mat'] = deepcopy(net.dat['mat'])
# cat_net.dat['nodes'] = deepcopy(net.dat['nodes'])
# cat_df = cat_net.dat_to_df()
# sub_df = {}
# if inst_rc == 'col':
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# elif inst_rc == 'row':
# # need to transpose df
# cat_df['mat'] = cat_df['mat'].transpose()
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# sub_df['mat'] = sub_df['mat'].transpose()
# # filter matrix before clustering
# ###################################
# threshold = 0.0001
# sub_df = run_filter.df_filter_row_sum(sub_df, threshold)
# sub_df = run_filter.df_filter_col_sum(sub_df, threshold)
# # load back to dat
# cat_net.df_to_dat(sub_df)
# cat_mat_shape = cat_net.dat['mat'].shape
# print('***************')
# try:
# if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False:
# calc_clust.cluster_row_and_col(cat_net, 'cos')
# inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust']
# else:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# except:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# prev_order_len = len(all_cat_orders)
# # add prev order length to the current order number
# inst_cat_order = [i + prev_order_len for i in inst_cat_order]
# all_cat_orders.extend(inst_cat_order)
# # generate ordered list of row/col names, which will be used to
# # assign the order to specific nodes
# names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders,
# tmp_names_list))]
names_clust_list = tmp_names_list
# calc category-cluster order
final_order = []
for i in range(len(net.dat['nodes'][inst_rc])):
inst_node_name = net.dat['nodes'][inst_rc][i]
inst_node_num = names_clust_list.index(inst_node_name)
final_order.append(inst_node_num)
inst_index_cat = inst_name_cat.replace('-', '_') + '_index'
net.dat['node_info'][inst_rc][inst_index_cat] = final_order | cluster category subset of data | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L137-L234 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/categories.py | order_categories | def order_categories(unordered_cats):
'''
If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list.
'''
no_titles = remove_titles(unordered_cats)
all_are_numbers = check_all_numbers(no_titles)
if all_are_numbers:
ordered_cats = order_cats_based_on_values(unordered_cats, no_titles)
else:
ordered_cats = sorted(unordered_cats)
return ordered_cats | python | def order_categories(unordered_cats):
'''
If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list.
'''
no_titles = remove_titles(unordered_cats)
all_are_numbers = check_all_numbers(no_titles)
if all_are_numbers:
ordered_cats = order_cats_based_on_values(unordered_cats, no_titles)
else:
ordered_cats = sorted(unordered_cats)
return ordered_cats | If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L237-L254 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.load_file_as_string | def load_file_as_string(self, file_string, filename=''):
'''
Load file as a string.
'''
load_data.load_file_as_string(self, file_string, filename=filename) | python | def load_file_as_string(self, file_string, filename=''):
'''
Load file as a string.
'''
load_data.load_file_as_string(self, file_string, filename=filename) | Load file as a string. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L56-L60 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.load_data_file_to_net | def load_data_file_to_net(self, filename):
'''
Load Clustergrammer's dat format (saved as JSON).
'''
inst_dat = self.load_json_to_dict(filename)
load_data.load_data_to_net(self, inst_dat) | python | def load_data_file_to_net(self, filename):
'''
Load Clustergrammer's dat format (saved as JSON).
'''
inst_dat = self.load_json_to_dict(filename)
load_data.load_data_to_net(self, inst_dat) | Load Clustergrammer's dat format (saved as JSON). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L82-L87 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.cluster | def cluster(self, dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=None, enrichrgram=None):
'''
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
'''
initialize_net.viz(self)
make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering,
dendro=dendro,
requested_views=views,
linkage_type=linkage_type,
sim_mat=sim_mat,
filter_sim=filter_sim,
calc_cat_pval=calc_cat_pval,
run_enrichr=run_enrichr,
enrichrgram=enrichrgram) | python | def cluster(self, dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=None, enrichrgram=None):
'''
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
'''
initialize_net.viz(self)
make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering,
dendro=dendro,
requested_views=views,
linkage_type=linkage_type,
sim_mat=sim_mat,
filter_sim=filter_sim,
calc_cat_pval=calc_cat_pval,
run_enrichr=run_enrichr,
enrichrgram=enrichrgram) | The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L89-L106 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.load_df | def load_df(self, df):
'''
Load Pandas DataFrame.
'''
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True) | python | def load_df(self, df):
'''
Load Pandas DataFrame.
'''
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True) | Load Pandas DataFrame. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L148-L158 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.df_to_dat | def df_to_dat(self, df, define_cat_colors=False):
'''
Load Pandas DataFrame (will be deprecated).
'''
data_formats.df_to_dat(self, df, define_cat_colors) | python | def df_to_dat(self, df, define_cat_colors=False):
'''
Load Pandas DataFrame (will be deprecated).
'''
data_formats.df_to_dat(self, df, define_cat_colors) | Load Pandas DataFrame (will be deprecated). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L167-L171 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.widget | def widget(self, which_viz='viz'):
'''
Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end.
'''
if hasattr(self, 'widget_class') == True:
# run clustering if necessary
if len(self.viz['row_nodes']) == 0:
self.cluster()
self.widget_instance = self.widget_class(network = self.export_viz_to_widget(which_viz))
return self.widget_instance
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)') | python | def widget(self, which_viz='viz'):
'''
Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end.
'''
if hasattr(self, 'widget_class') == True:
# run clustering if necessary
if len(self.viz['row_nodes']) == 0:
self.cluster()
self.widget_instance = self.widget_class(network = self.export_viz_to_widget(which_viz))
return self.widget_instance
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)') | Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L210-L227 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.widget_df | def widget_df(self):
'''
Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method.
'''
if hasattr(self, 'widget_instance') == True:
if self.widget_instance.mat_string != '':
tmp_net = deepcopy(Network())
df_string = self.widget_instance.mat_string
tmp_net.load_file_as_string(df_string)
df = tmp_net.export_df()
return df
else:
return self.export_df()
else:
if hasattr(self, 'widget_class') == True:
print('Please make the widget before exporting the widget DataFrame.')
print('Do this using the widget method: net.widget()')
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)') | python | def widget_df(self):
'''
Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method.
'''
if hasattr(self, 'widget_instance') == True:
if self.widget_instance.mat_string != '':
tmp_net = deepcopy(Network())
df_string = self.widget_instance.mat_string
tmp_net.load_file_as_string(df_string)
df = tmp_net.export_df()
return df
else:
return self.export_df()
else:
if hasattr(self, 'widget_class') == True:
print('Please make the widget before exporting the widget DataFrame.')
print('Do this using the widget method: net.widget()')
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)') | Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L230-L261 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.write_json_to_file | def write_json_to_file(self, net_type, filename, indent='no-indent'):
'''
Save dat or viz as a JSON to file.
'''
export_data.write_json_to_file(self, net_type, filename, indent) | python | def write_json_to_file(self, net_type, filename, indent='no-indent'):
'''
Save dat or viz as a JSON to file.
'''
export_data.write_json_to_file(self, net_type, filename, indent) | Save dat or viz as a JSON to file. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L263-L267 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.filter_sum | def filter_sum(self, inst_rc, threshold, take_abs=True):
'''
Filter a network's rows or columns based on the sum across rows or columns.
'''
inst_df = self.dat_to_df()
if inst_rc == 'row':
inst_df = run_filter.df_filter_row_sum(inst_df, threshold, take_abs)
elif inst_rc == 'col':
inst_df = run_filter.df_filter_col_sum(inst_df, threshold, take_abs)
self.df_to_dat(inst_df) | python | def filter_sum(self, inst_rc, threshold, take_abs=True):
'''
Filter a network's rows or columns based on the sum across rows or columns.
'''
inst_df = self.dat_to_df()
if inst_rc == 'row':
inst_df = run_filter.df_filter_row_sum(inst_df, threshold, take_abs)
elif inst_rc == 'col':
inst_df = run_filter.df_filter_col_sum(inst_df, threshold, take_abs)
self.df_to_dat(inst_df) | Filter a network's rows or columns based on the sum across rows or columns. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L275-L284 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.filter_N_top | def filter_N_top(self, inst_rc, N_top, rank_type='sum'):
'''
Filter the matrix rows or columns based on sum/variance, and only keep the top
N.
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_N_top(inst_rc, inst_df, N_top, rank_type)
self.df_to_dat(inst_df) | python | def filter_N_top(self, inst_rc, N_top, rank_type='sum'):
'''
Filter the matrix rows or columns based on sum/variance, and only keep the top
N.
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_N_top(inst_rc, inst_df, N_top, rank_type)
self.df_to_dat(inst_df) | Filter the matrix rows or columns based on sum/variance, and only keep the top
N. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L286-L295 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.filter_threshold | def filter_threshold(self, inst_rc, threshold, num_occur=1):
'''
Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value).
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_threshold(inst_df, inst_rc, threshold,
num_occur)
self.df_to_dat(inst_df) | python | def filter_threshold(self, inst_rc, threshold, num_occur=1):
'''
Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value).
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_threshold(inst_df, inst_rc, threshold,
num_occur)
self.df_to_dat(inst_df) | Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L297-L307 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.filter_cat | def filter_cat(self, axis, cat_index, cat_name):
'''
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
'''
run_filter.filter_cat(self, axis, cat_index, cat_name) | python | def filter_cat(self, axis, cat_index, cat_name):
'''
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
'''
run_filter.filter_cat(self, axis, cat_index, cat_name) | Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L309-L313 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.clip | def clip(self, lower=None, upper=None):
'''
Trim values at input thresholds using pandas function
'''
df = self.export_df()
df = df.clip(lower=lower, upper=upper)
self.load_df(df) | python | def clip(self, lower=None, upper=None):
'''
Trim values at input thresholds using pandas function
'''
df = self.export_df()
df = df.clip(lower=lower, upper=upper)
self.load_df(df) | Trim values at input thresholds using pandas function | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L321-L327 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.normalize | def normalize(self, df=None, norm_type='zscore', axis='row', keep_orig=False):
'''
Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object).
'''
normalize_fun.run_norm(self, df, norm_type, axis, keep_orig) | python | def normalize(self, df=None, norm_type='zscore', axis='row', keep_orig=False):
'''
Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object).
'''
normalize_fun.run_norm(self, df, norm_type, axis, keep_orig) | Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L329-L333 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.downsample | def downsample(self, df=None, ds_type='kmeans', axis='row', num_samples=100, random_state=1000):
'''
Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object).
'''
return downsample_fun.main(self, df, ds_type, axis, num_samples, random_state) | python | def downsample(self, df=None, ds_type='kmeans', axis='row', num_samples=100, random_state=1000):
'''
Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object).
'''
return downsample_fun.main(self, df, ds_type, axis, num_samples, random_state) | Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L335-L340 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.random_sample | def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row'):
'''
Return random sample of matrix.
'''
if df is None:
df = self.dat_to_df()
if axis == 'row':
axis = 0
if axis == 'col':
axis = 1
df = self.export_df()
df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis)
self.load_df(df) | python | def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row'):
'''
Return random sample of matrix.
'''
if df is None:
df = self.dat_to_df()
if axis == 'row':
axis = 0
if axis == 'col':
axis = 1
df = self.export_df()
df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis)
self.load_df(df) | Return random sample of matrix. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L342-L358 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.add_cats | def add_cats(self, axis, cat_data):
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
'''
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data) | python | def add_cats(self, axis, cat_data):
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
'''
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data) | Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
] | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L360-L390 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.enrichrgram | def enrichrgram(self, lib, axis='row'):
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
'''
df = self.export_df()
df, bar_info = enr_fun.add_enrichr_cats(df, axis, lib)
self.load_df(df)
self.dat['enrichrgram_lib'] = lib
self.dat['row_cat_bars'] = bar_info | python | def enrichrgram(self, lib, axis='row'):
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
'''
df = self.export_df()
df, bar_info = enr_fun.add_enrichr_cats(df, axis, lib)
self.load_df(df)
self.dat['enrichrgram_lib'] = lib
self.dat['row_cat_bars'] = bar_info | Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L406-L438 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.load_gene_exp_to_df | def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
dup_index[inst_row] = 1
else:
dup_index[inst_row] = dup_index[inst_row] + 1
new_row = inst_row + '_' + str(dup_index[inst_row])
else:
new_row = inst_row
genes.append(new_row)
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
cell_barcodes = []
for inst_bc in lines:
inst_bc = inst_bc.strip().split('\t')
# remove dash from barcodes if necessary
if '-' in inst_bc[0]:
inst_bc[0] = inst_bc[0].split('-')[0]
cell_barcodes.append(inst_bc[0])
# parse tuples if necessary
try:
cell_barcodes = [make_tuple(x) for x in cell_barcodes]
except:
pass
try:
genes = [make_tuple(x) for x in genes]
except:
pass
# make dataframe
df = pd.DataFrame(mat, index=genes, columns=cell_barcodes)
return df | python | def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
dup_index[inst_row] = 1
else:
dup_index[inst_row] = dup_index[inst_row] + 1
new_row = inst_row + '_' + str(dup_index[inst_row])
else:
new_row = inst_row
genes.append(new_row)
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
cell_barcodes = []
for inst_bc in lines:
inst_bc = inst_bc.strip().split('\t')
# remove dash from barcodes if necessary
if '-' in inst_bc[0]:
inst_bc[0] = inst_bc[0].split('-')[0]
cell_barcodes.append(inst_bc[0])
# parse tuples if necessary
try:
cell_barcodes = [make_tuple(x) for x in cell_barcodes]
except:
pass
try:
genes = [make_tuple(x) for x in genes]
except:
pass
# make dataframe
df = pd.DataFrame(mat, index=genes, columns=cell_barcodes)
return df | Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L453-L551 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.sim_same_and_diff_category_samples | def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine',
equal_var=False, plot_roc=True,
precalc_dist=False, calc_roc=True):
'''
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
'''
cols = df.columns.tolist()
if type(precalc_dist) == bool:
# compute distnace between rows (transpose to get cols as rows)
dist_arr = 1 - pdist(df.transpose(), metric=dist_type)
else:
dist_arr = precalc_dist
# generate sample names with categories
sample_combos = list(combinations(range(df.shape[1]),2))
sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)]
ser_dist = pd.Series(data=dist_arr, index=sample_names)
# find same-cat sample comparisons
same_cat = [x for x in sample_names if x.split('_')[1] == 'same']
# find diff-cat sample comparisons
diff_cat = [x for x in sample_names if x.split('_')[1] == 'different']
# make series of same and diff category sample comparisons
ser_same = ser_dist[same_cat]
ser_same.name = 'Same Category'
ser_diff = ser_dist[diff_cat]
ser_diff.name = 'Different Category'
sim_dict = {}
roc_data = {}
sim_data = {}
sim_dict['same'] = ser_same
sim_dict['diff'] = ser_diff
pval_dict = {}
ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var)
ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same)
if calc_roc:
# calc AUC
true_index = list(np.ones(sim_dict['same'].shape[0]))
false_index = list(np.zeros(sim_dict['diff'].shape[0]))
y_true = true_index + false_index
true_val = list(sim_dict['same'].get_values())
false_val = list(sim_dict['diff'].get_values())
y_score = true_val + false_val
fpr, tpr, thresholds = roc_curve(y_true, y_score)
inst_auc = auc(fpr, tpr)
if plot_roc:
plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.figure(figsize=(10,10))
print('AUC', inst_auc)
roc_data['true'] = y_true
roc_data['score'] = y_score
roc_data['fpr'] = fpr
roc_data['tpr'] = tpr
roc_data['thresholds'] = thresholds
roc_data['auc'] = inst_auc
sim_data['sim_dict'] = sim_dict
sim_data['pval_dict'] = pval_dict
sim_data['roc_data'] = roc_data
return sim_data | python | def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine',
equal_var=False, plot_roc=True,
precalc_dist=False, calc_roc=True):
'''
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
'''
cols = df.columns.tolist()
if type(precalc_dist) == bool:
# compute distnace between rows (transpose to get cols as rows)
dist_arr = 1 - pdist(df.transpose(), metric=dist_type)
else:
dist_arr = precalc_dist
# generate sample names with categories
sample_combos = list(combinations(range(df.shape[1]),2))
sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)]
ser_dist = pd.Series(data=dist_arr, index=sample_names)
# find same-cat sample comparisons
same_cat = [x for x in sample_names if x.split('_')[1] == 'same']
# find diff-cat sample comparisons
diff_cat = [x for x in sample_names if x.split('_')[1] == 'different']
# make series of same and diff category sample comparisons
ser_same = ser_dist[same_cat]
ser_same.name = 'Same Category'
ser_diff = ser_dist[diff_cat]
ser_diff.name = 'Different Category'
sim_dict = {}
roc_data = {}
sim_data = {}
sim_dict['same'] = ser_same
sim_dict['diff'] = ser_diff
pval_dict = {}
ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var)
ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same)
if calc_roc:
# calc AUC
true_index = list(np.ones(sim_dict['same'].shape[0]))
false_index = list(np.zeros(sim_dict['diff'].shape[0]))
y_true = true_index + false_index
true_val = list(sim_dict['same'].get_values())
false_val = list(sim_dict['diff'].get_values())
y_score = true_val + false_val
fpr, tpr, thresholds = roc_curve(y_true, y_score)
inst_auc = auc(fpr, tpr)
if plot_roc:
plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.figure(figsize=(10,10))
print('AUC', inst_auc)
roc_data['true'] = y_true
roc_data['score'] = y_score
roc_data['fpr'] = fpr
roc_data['tpr'] = tpr
roc_data['thresholds'] = thresholds
roc_data['auc'] = inst_auc
sim_data['sim_dict'] = sim_dict
sim_data['pval_dict'] = pval_dict
sim_data['roc_data'] = roc_data
return sim_data | Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L583-L663 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.generate_signatures | def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05,
num_top_dims=False, verbose=True, equal_var=False):
''' Generate signatures for column categories '''
df_t = df_ini.transpose()
# remove columns with constant values
df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()]
df = self.row_tuple_to_multiindex(df_t)
cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist())))
keep_genes = []
keep_genes_dict = {}
gene_pval_dict = {}
all_fold_info = {}
for inst_ct in cell_types:
inst_ct_mat = df.xs(key=inst_ct, level=category_level)
inst_other_mat = df.drop(inst_ct, level=category_level)
# save mean values and fold change
fold_info = {}
fold_info['cluster_mean'] = inst_ct_mat.mean()
fold_info['other_mean'] = inst_other_mat.mean()
fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean']
fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2)
all_fold_info[inst_ct] = fold_info
inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var)
ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values()
if num_top_dims == False:
ser_pval_keep = ser_pval[ser_pval < pval_cutoff]
else:
ser_pval_keep = ser_pval[:num_top_dims]
gene_pval_dict[inst_ct] = ser_pval_keep
inst_keep = ser_pval_keep.index.tolist()
keep_genes.extend(inst_keep)
keep_genes_dict[inst_ct] = inst_keep
keep_genes = sorted(list(set(keep_genes)))
df_gbm = df.groupby(level=category_level).mean().transpose()
cols = df_gbm.columns.tolist()
new_cols = []
for inst_col in cols:
new_col = (inst_col, category_level + ': ' + inst_col)
new_cols.append(new_col)
df_gbm.columns = new_cols
df_sig = df_gbm.ix[keep_genes]
if len(keep_genes) == 0 and verbose:
print('found no informative dimensions')
df_gene_pval = pd.concat(gene_pval_dict, axis=1, sort=False)
return df_sig, keep_genes_dict, df_gene_pval, all_fold_info | python | def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05,
num_top_dims=False, verbose=True, equal_var=False):
''' Generate signatures for column categories '''
df_t = df_ini.transpose()
# remove columns with constant values
df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()]
df = self.row_tuple_to_multiindex(df_t)
cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist())))
keep_genes = []
keep_genes_dict = {}
gene_pval_dict = {}
all_fold_info = {}
for inst_ct in cell_types:
inst_ct_mat = df.xs(key=inst_ct, level=category_level)
inst_other_mat = df.drop(inst_ct, level=category_level)
# save mean values and fold change
fold_info = {}
fold_info['cluster_mean'] = inst_ct_mat.mean()
fold_info['other_mean'] = inst_other_mat.mean()
fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean']
fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2)
all_fold_info[inst_ct] = fold_info
inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var)
ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values()
if num_top_dims == False:
ser_pval_keep = ser_pval[ser_pval < pval_cutoff]
else:
ser_pval_keep = ser_pval[:num_top_dims]
gene_pval_dict[inst_ct] = ser_pval_keep
inst_keep = ser_pval_keep.index.tolist()
keep_genes.extend(inst_keep)
keep_genes_dict[inst_ct] = inst_keep
keep_genes = sorted(list(set(keep_genes)))
df_gbm = df.groupby(level=category_level).mean().transpose()
cols = df_gbm.columns.tolist()
new_cols = []
for inst_col in cols:
new_col = (inst_col, category_level + ': ' + inst_col)
new_cols.append(new_col)
df_gbm.columns = new_cols
df_sig = df_gbm.ix[keep_genes]
if len(keep_genes) == 0 and verbose:
print('found no informative dimensions')
df_gene_pval = pd.concat(gene_pval_dict, axis=1, sort=False)
return df_sig, keep_genes_dict, df_gene_pval, all_fold_info | Generate signatures for column categories | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L665-L729 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.predict_cats_from_sigs | def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1):
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
cols = df_cat.columns.tolist()
new_cols = []
# check whether the columns have the true category available
has_truth = False
if type(cols[0]) is tuple:
has_truth = True
if has_truth:
new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)]
else:
new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)]
# transfer new categories
df_cat.columns = new_cols
# keep track of true and predicted labels
y_info = {}
y_info['true'] = []
y_info['pred'] = []
if has_truth:
y_info['true'] = [x[truth_level].split(': ')[1] for x in cols]
y_info['pred'] = [x.split(': ')[1] for x in top_list]
return df_cat, df_sim.transpose(), y_info | python | def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1):
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
cols = df_cat.columns.tolist()
new_cols = []
# check whether the columns have the true category available
has_truth = False
if type(cols[0]) is tuple:
has_truth = True
if has_truth:
new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)]
else:
new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)]
# transfer new categories
df_cat.columns = new_cols
# keep track of true and predicted labels
y_info = {}
y_info['true'] = []
y_info['pred'] = []
if has_truth:
y_info['true'] = [x[truth_level].split(': ')[1] for x in cols]
y_info['pred'] = [x.split(': ')[1] for x in top_list]
return df_cat, df_sim.transpose(), y_info | Predict category using signature | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L731-L791 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/__init__.py | Network.confusion_matrix_and_correct_series | def confusion_matrix_and_correct_series(self, y_info):
''' Generate confusion matrix from y_info '''
a = deepcopy(y_info['true'])
true_count = dict((i, a.count(i)) for i in set(a))
a = deepcopy(y_info['pred'])
pred_count = dict((i, a.count(i)) for i in set(a))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct/float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
for inst_cat in all_cols:
inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]
correct_list.append(inst_correct)
ser_correct = pd.Series(data=correct_list, index=all_cols)
populations = {}
populations['true'] = true_count
populations['pred'] = pred_count
return df_conf, populations, ser_correct, fraction_correct | python | def confusion_matrix_and_correct_series(self, y_info):
''' Generate confusion matrix from y_info '''
a = deepcopy(y_info['true'])
true_count = dict((i, a.count(i)) for i in set(a))
a = deepcopy(y_info['pred'])
pred_count = dict((i, a.count(i)) for i in set(a))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct/float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
for inst_cat in all_cols:
inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]
correct_list.append(inst_correct)
ser_correct = pd.Series(data=correct_list, index=all_cols)
populations = {}
populations['true'] = true_count
populations['pred'] = pred_count
return df_conf, populations, ser_correct, fraction_correct | Generate confusion matrix from y_info | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L793-L825 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/load_data.py | load_data_to_net | def load_data_to_net(net, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
net.dat['nodes'] = inst_net['nodes']
net.dat['mat'] = inst_net['mat']
data_formats.mat_to_numpy_arr(net) | python | def load_data_to_net(net, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
net.dat['nodes'] = inst_net['nodes']
net.dat['mat'] = inst_net['mat']
data_formats.mat_to_numpy_arr(net) | load data into nodes and mat, also convert mat to numpy array | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/load_data.py#L96-L100 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/export_data.py | export_net_json | def export_net_json(net, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(net.dat)
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
if 'mat_orig' in exp_dict:
exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist()
elif net_type == 'viz':
exp_dict = net.viz
elif net_type == 'sim_row':
exp_dict = net.sim['row']
elif net_type == 'sim_col':
exp_dict = net.sim['col']
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json | python | def export_net_json(net, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(net.dat)
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
if 'mat_orig' in exp_dict:
exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist()
elif net_type == 'viz':
exp_dict = net.viz
elif net_type == 'sim_row':
exp_dict = net.sim['row']
elif net_type == 'sim_col':
exp_dict = net.sim['col']
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json | export json string of dat | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/export_data.py#L1-L29 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/export_data.py | write_matrix_to_tsv | def write_matrix_to_tsv(net, filename=None, df=None):
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t') | python | def write_matrix_to_tsv(net, filename=None, df=None):
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t') | This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/export_data.py#L31-L42 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/make_unique_labels.py | main | def main(net, df=None):
'''
Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary
'''
if df is None:
df = net.export_df()
# rows
#############
rows = df.index.tolist()
if type(rows[0]) is str:
if len(rows) != len(list(set(rows))):
new_rows = add_index_list(rows)
df.index = new_rows
elif type(rows[0]) is tuple:
row_names = []
for inst_row in rows:
row_names.append(inst_row[0])
if len(row_names) != len(list(set(row_names))):
row_names = add_index_list(row_names)
# add back to tuple
new_rows = []
for inst_index in range(len(rows)):
inst_row = rows[inst_index]
new_row = list(inst_row)
new_row[0] = row_names[inst_index]
new_row = tuple(new_row)
new_rows.append(new_row)
df.index = new_rows
# cols
#############
cols = df.columns.tolist()
if type(cols[0]) is str:
# list column names
if len(cols) != len(list(set(cols))):
new_cols = add_index_list(cols)
df.columns = new_cols
elif type(cols[0]) is tuple:
col_names = []
for inst_col in cols:
col_names.append(inst_col[0])
if len(col_names) != len(list(set(col_names))):
col_names = add_index_list(col_names)
# add back to tuple
new_cols = []
for inst_index in range(len(cols)):
inst_col = cols[inst_index]
new_col = list(inst_col)
new_col[0] = col_names[inst_index]
new_col = tuple(new_col)
new_cols.append(new_col)
df.columns = new_cols
# return dataframe with unique names
return df | python | def main(net, df=None):
'''
Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary
'''
if df is None:
df = net.export_df()
# rows
#############
rows = df.index.tolist()
if type(rows[0]) is str:
if len(rows) != len(list(set(rows))):
new_rows = add_index_list(rows)
df.index = new_rows
elif type(rows[0]) is tuple:
row_names = []
for inst_row in rows:
row_names.append(inst_row[0])
if len(row_names) != len(list(set(row_names))):
row_names = add_index_list(row_names)
# add back to tuple
new_rows = []
for inst_index in range(len(rows)):
inst_row = rows[inst_index]
new_row = list(inst_row)
new_row[0] = row_names[inst_index]
new_row = tuple(new_row)
new_rows.append(new_row)
df.index = new_rows
# cols
#############
cols = df.columns.tolist()
if type(cols[0]) is str:
# list column names
if len(cols) != len(list(set(cols))):
new_cols = add_index_list(cols)
df.columns = new_cols
elif type(cols[0]) is tuple:
col_names = []
for inst_col in cols:
col_names.append(inst_col[0])
if len(col_names) != len(list(set(col_names))):
col_names = add_index_list(col_names)
# add back to tuple
new_cols = []
for inst_index in range(len(cols)):
inst_col = cols[inst_index]
new_col = list(inst_col)
new_col[0] = col_names[inst_index]
new_col = tuple(new_col)
new_cols.append(new_col)
df.columns = new_cols
# return dataframe with unique names
return df | Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/make_unique_labels.py#L3-L71 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/make_viz.py | viz_json | def viz_json(net, dendro=True, links=False):
''' make the dictionary for the clustergram.js visualization '''
from . import calc_clust
import numpy as np
all_dist = calc_clust.group_cutoffs()
for inst_rc in net.dat['nodes']:
inst_keys = net.dat['node_info'][inst_rc]
all_cats = [x for x in inst_keys if 'cat-' in x]
for i in range(len(net.dat['nodes'][inst_rc])):
inst_dict = {}
inst_dict['name'] = net.dat['nodes'][inst_rc][i]
inst_dict['ini'] = net.dat['node_info'][inst_rc]['ini'][i]
inst_dict['clust'] = net.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = net.dat['node_info'][inst_rc]['rank'][i]
if 'rankvar' in inst_keys:
inst_dict['rankvar'] = net.dat['node_info'][inst_rc]['rankvar'][i]
# fix for similarity matrix
if len(all_cats) > 0:
for inst_name_cat in all_cats:
actual_cat_name = net.dat['node_info'][inst_rc][inst_name_cat][i]
inst_dict[inst_name_cat] = actual_cat_name
check_pval = 'pval_'+inst_name_cat.replace('-','_')
if check_pval in net.dat['node_info'][inst_rc]:
tmp_pval_name = inst_name_cat.replace('-','_') + '_pval'
inst_dict[tmp_pval_name] = net.dat['node_info'][inst_rc][check_pval][actual_cat_name]
tmp_index_name = inst_name_cat.replace('-', '_') + '_index'
inst_dict[tmp_index_name] = net.dat['node_info'][inst_rc] \
[tmp_index_name][i]
if len(net.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = net.dat['node_info'][inst_rc]['value'][i]
if len(net.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = net.dat['node_info'][inst_rc]['info'][i]
if dendro is True:
inst_dict['group'] = []
for tmp_dist in all_dist:
tmp_dist = str(tmp_dist).replace('.', '')
tmp_append = float(
net.dat['node_info'][inst_rc]['group'][tmp_dist][i])
inst_dict['group'].append(tmp_append)
net.viz[inst_rc + '_nodes'].append(inst_dict)
mat_types = ['mat', 'mat_orig', 'mat_info', 'mat_hl', 'mat_up', 'mat_dn']
# save data as links or mat
###########################
if links is True:
for i in range(len(net.dat['nodes']['row'])):
for j in range(len(net.dat['nodes']['col'])):
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = float(net.dat['mat'][i, j])
if 'mat_up' in net.dat:
inst_dict['value_up'] = net.dat['mat_up'][i, j]
inst_dict['value_dn'] = net.dat['mat_dn'][i, j]
if 'mat_orig' in net.dat:
inst_dict['value_orig'] = net.dat['mat_orig'][i, j]
if np.isnan(inst_dict['value_orig']):
inst_dict['value_orig'] = 'NaN'
if 'mat_info' in net.dat:
inst_dict['info'] = net.dat['mat_info'][str((i, j))]
if 'mat_hl' in net.dat:
inst_dict['highlight'] = net.dat['mat_hl'][i, j]
net.viz['links'].append(inst_dict)
else:
for inst_mat in mat_types:
if inst_mat in net.dat:
net.viz[inst_mat] = net.dat[inst_mat].tolist() | python | def viz_json(net, dendro=True, links=False):
''' make the dictionary for the clustergram.js visualization '''
from . import calc_clust
import numpy as np
all_dist = calc_clust.group_cutoffs()
for inst_rc in net.dat['nodes']:
inst_keys = net.dat['node_info'][inst_rc]
all_cats = [x for x in inst_keys if 'cat-' in x]
for i in range(len(net.dat['nodes'][inst_rc])):
inst_dict = {}
inst_dict['name'] = net.dat['nodes'][inst_rc][i]
inst_dict['ini'] = net.dat['node_info'][inst_rc]['ini'][i]
inst_dict['clust'] = net.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = net.dat['node_info'][inst_rc]['rank'][i]
if 'rankvar' in inst_keys:
inst_dict['rankvar'] = net.dat['node_info'][inst_rc]['rankvar'][i]
# fix for similarity matrix
if len(all_cats) > 0:
for inst_name_cat in all_cats:
actual_cat_name = net.dat['node_info'][inst_rc][inst_name_cat][i]
inst_dict[inst_name_cat] = actual_cat_name
check_pval = 'pval_'+inst_name_cat.replace('-','_')
if check_pval in net.dat['node_info'][inst_rc]:
tmp_pval_name = inst_name_cat.replace('-','_') + '_pval'
inst_dict[tmp_pval_name] = net.dat['node_info'][inst_rc][check_pval][actual_cat_name]
tmp_index_name = inst_name_cat.replace('-', '_') + '_index'
inst_dict[tmp_index_name] = net.dat['node_info'][inst_rc] \
[tmp_index_name][i]
if len(net.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = net.dat['node_info'][inst_rc]['value'][i]
if len(net.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = net.dat['node_info'][inst_rc]['info'][i]
if dendro is True:
inst_dict['group'] = []
for tmp_dist in all_dist:
tmp_dist = str(tmp_dist).replace('.', '')
tmp_append = float(
net.dat['node_info'][inst_rc]['group'][tmp_dist][i])
inst_dict['group'].append(tmp_append)
net.viz[inst_rc + '_nodes'].append(inst_dict)
mat_types = ['mat', 'mat_orig', 'mat_info', 'mat_hl', 'mat_up', 'mat_dn']
# save data as links or mat
###########################
if links is True:
for i in range(len(net.dat['nodes']['row'])):
for j in range(len(net.dat['nodes']['col'])):
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = float(net.dat['mat'][i, j])
if 'mat_up' in net.dat:
inst_dict['value_up'] = net.dat['mat_up'][i, j]
inst_dict['value_dn'] = net.dat['mat_dn'][i, j]
if 'mat_orig' in net.dat:
inst_dict['value_orig'] = net.dat['mat_orig'][i, j]
if np.isnan(inst_dict['value_orig']):
inst_dict['value_orig'] = 'NaN'
if 'mat_info' in net.dat:
inst_dict['info'] = net.dat['mat_info'][str((i, j))]
if 'mat_hl' in net.dat:
inst_dict['highlight'] = net.dat['mat_hl'][i, j]
net.viz['links'].append(inst_dict)
else:
for inst_mat in mat_types:
if inst_mat in net.dat:
net.viz[inst_mat] = net.dat[inst_mat].tolist() | make the dictionary for the clustergram.js visualization | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/make_viz.py#L1-L94 |
ismms-himc/clustergrammer2 | setupbase.py | install_npm | def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None):
"""Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable].
"""
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or HERE
node_modules = pjoin(node_package, 'node_modules')
is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock'))
npm_cmd = npm
if npm is None:
if is_yarn:
npm_cmd = ['yarn']
else:
npm_cmd = ['npm']
if not which(npm_cmd[0]):
log.error("`{0}` unavailable. If you're running this command "
"using sudo, make sure `{0}` is availble to sudo"
.format(npm_cmd[0]))
return
if force or is_stale(node_modules, pjoin(node_package, 'package.json')):
log.info('Installing build dependencies with npm. This may '
'take a while...')
run(npm_cmd + ['install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(npm_cmd + ['run', build_cmd], cwd=node_package)
return NPM | python | def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None):
"""Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable].
"""
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or HERE
node_modules = pjoin(node_package, 'node_modules')
is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock'))
npm_cmd = npm
if npm is None:
if is_yarn:
npm_cmd = ['yarn']
else:
npm_cmd = ['npm']
if not which(npm_cmd[0]):
log.error("`{0}` unavailable. If you're running this command "
"using sudo, make sure `{0}` is availble to sudo"
.format(npm_cmd[0]))
return
if force or is_stale(node_modules, pjoin(node_package, 'package.json')):
log.info('Installing build dependencies with npm. This may '
'take a while...')
run(npm_cmd + ['install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(npm_cmd + ['run', build_cmd], cwd=node_package)
return NPM | Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable]. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L321-L377 |
ismms-himc/clustergrammer2 | setupbase.py | _glob_pjoin | def _glob_pjoin(*parts):
"""Join paths for glob processing"""
if parts[0] in ('.', ''):
parts = parts[1:]
return pjoin(*parts).replace(os.sep, '/') | python | def _glob_pjoin(*parts):
"""Join paths for glob processing"""
if parts[0] in ('.', ''):
parts = parts[1:]
return pjoin(*parts).replace(os.sep, '/') | Join paths for glob processing | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L507-L511 |
ismms-himc/clustergrammer2 | setupbase.py | _get_data_files | def _get_data_files(data_specs, existing, top=HERE):
"""Expand data file specs into valid data files metadata.
Parameters
----------
data_specs: list of tuples
See [create_cmdclass] for description.
existing: list of tuples
The existing distrubution data_files metadata.
Returns
-------
A valid list of data_files items.
"""
# Extract the existing data files into a staging object.
file_data = defaultdict(list)
for (path, files) in existing or []:
file_data[path] = files
# Extract the files and assign them to the proper data
# files path.
for (path, dname, pattern) in data_specs or []:
if os.path.isabs(dname):
dname = os.path.relpath(dname, top)
dname = dname.replace(os.sep, '/')
offset = 0 if dname in ('.', '') else len(dname) + 1
files = _get_files(_glob_pjoin(dname, pattern), top=top)
for fname in files:
# Normalize the path.
root = os.path.dirname(fname)
full_path = _glob_pjoin(path, root[offset:])
print(dname, root, full_path, offset)
if full_path.endswith('/'):
full_path = full_path[:-1]
file_data[full_path].append(fname)
# Construct the data files spec.
data_files = []
for (path, files) in file_data.items():
data_files.append((path, files))
return data_files | python | def _get_data_files(data_specs, existing, top=HERE):
"""Expand data file specs into valid data files metadata.
Parameters
----------
data_specs: list of tuples
See [create_cmdclass] for description.
existing: list of tuples
The existing distrubution data_files metadata.
Returns
-------
A valid list of data_files items.
"""
# Extract the existing data files into a staging object.
file_data = defaultdict(list)
for (path, files) in existing or []:
file_data[path] = files
# Extract the files and assign them to the proper data
# files path.
for (path, dname, pattern) in data_specs or []:
if os.path.isabs(dname):
dname = os.path.relpath(dname, top)
dname = dname.replace(os.sep, '/')
offset = 0 if dname in ('.', '') else len(dname) + 1
files = _get_files(_glob_pjoin(dname, pattern), top=top)
for fname in files:
# Normalize the path.
root = os.path.dirname(fname)
full_path = _glob_pjoin(path, root[offset:])
print(dname, root, full_path, offset)
if full_path.endswith('/'):
full_path = full_path[:-1]
file_data[full_path].append(fname)
# Construct the data files spec.
data_files = []
for (path, files) in file_data.items():
data_files.append((path, files))
return data_files | Expand data file specs into valid data files metadata.
Parameters
----------
data_specs: list of tuples
See [create_cmdclass] for description.
existing: list of tuples
The existing distrubution data_files metadata.
Returns
-------
A valid list of data_files items. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L514-L554 |
ismms-himc/clustergrammer2 | setupbase.py | _get_files | def _get_files(file_patterns, top=HERE):
"""Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
"""
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
for root, dirnames, filenames in os.walk(top):
# Don't recurse into node_modules
if 'node_modules' in dirnames:
dirnames.remove('node_modules')
for m in matchers:
for filename in filenames:
fn = os.path.relpath(_glob_pjoin(root, filename), top)
fn = fn.replace(os.sep, '/')
if m(fn):
files.add(fn.replace(os.sep, '/'))
return list(files) | python | def _get_files(file_patterns, top=HERE):
"""Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
"""
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
for root, dirnames, filenames in os.walk(top):
# Don't recurse into node_modules
if 'node_modules' in dirnames:
dirnames.remove('node_modules')
for m in matchers:
for filename in filenames:
fn = os.path.relpath(_glob_pjoin(root, filename), top)
fn = fn.replace(os.sep, '/')
if m(fn):
files.add(fn.replace(os.sep, '/'))
return list(files) | Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L557-L595 |
ismms-himc/clustergrammer2 | setupbase.py | _get_package_data | def _get_package_data(root, file_patterns=None):
"""Expand file patterns to a list of `package_data` paths.
Parameters
-----------
root: str
The relative path to the package root from `HERE`.
file_patterns: list or str, optional
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the root or
absolute paths. If not given, all files will be used.
Note:
Files in `node_modules` are ignored.
"""
if file_patterns is None:
file_patterns = ['*']
return _get_files(file_patterns, _glob_pjoin(HERE, root)) | python | def _get_package_data(root, file_patterns=None):
"""Expand file patterns to a list of `package_data` paths.
Parameters
-----------
root: str
The relative path to the package root from `HERE`.
file_patterns: list or str, optional
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the root or
absolute paths. If not given, all files will be used.
Note:
Files in `node_modules` are ignored.
"""
if file_patterns is None:
file_patterns = ['*']
return _get_files(file_patterns, _glob_pjoin(HERE, root)) | Expand file patterns to a list of `package_data` paths.
Parameters
-----------
root: str
The relative path to the package root from `HERE`.
file_patterns: list or str, optional
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the root or
absolute paths. If not given, all files will be used.
Note:
Files in `node_modules` are ignored. | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L598-L616 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/run_filter.py | df_filter_row_sum | def df_filter_row_sum(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
df_copy = df_copy.transpose()
tmp_sum = df_copy.sum(axis=0)
tmp_sum = tmp_sum.abs()
tmp_sum.sort_values(inplace=True, ascending=False)
tmp_sum = tmp_sum[tmp_sum > threshold]
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_rows)
return df | python | def df_filter_row_sum(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
df_copy = df_copy.transpose()
tmp_sum = df_copy.sum(axis=0)
tmp_sum = tmp_sum.abs()
tmp_sum.sort_values(inplace=True, ascending=False)
tmp_sum = tmp_sum[tmp_sum > threshold]
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_rows)
return df | filter rows in matrix at some threshold
and remove columns that have a sum below this threshold | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L1-L33 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/run_filter.py | df_filter_col_sum | def df_filter_col_sum(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > 0]
if take_abs is True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
else:
df['mat'] = df_copy
return df | python | def df_filter_col_sum(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > 0]
if take_abs is True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
else:
df['mat'] = df_copy
return df | filter columns in matrix at some threshold
and remove rows that have all zero values | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L35-L68 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/run_filter.py | filter_threshold | def filter_threshold(df, inst_rc, threshold, num_occur=1):
'''
Filter a network's rows or cols based on num_occur values being above a
threshold (in absolute_value)
'''
from copy import deepcopy
inst_df = deepcopy(df['mat'])
if inst_rc == 'col':
inst_df = inst_df.transpose()
inst_df = inst_df.abs()
ini_rows = inst_df.index.values.tolist()
inst_df[inst_df < threshold] = 0
inst_df[inst_df >= threshold] = 1
tmp_sum = inst_df.sum(axis=1)
tmp_sum = tmp_sum[tmp_sum >= num_occur]
keep_names = tmp_sum.index.values.tolist()
if inst_rc == 'row':
if len(keep_names) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_names)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_names)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_names)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_names)
elif inst_rc == 'col':
inst_df = inst_df.transpose()
inst_rows = inst_df.index.values.tolist()
inst_cols = keep_names
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
return df | python | def filter_threshold(df, inst_rc, threshold, num_occur=1):
'''
Filter a network's rows or cols based on num_occur values being above a
threshold (in absolute_value)
'''
from copy import deepcopy
inst_df = deepcopy(df['mat'])
if inst_rc == 'col':
inst_df = inst_df.transpose()
inst_df = inst_df.abs()
ini_rows = inst_df.index.values.tolist()
inst_df[inst_df < threshold] = 0
inst_df[inst_df >= threshold] = 1
tmp_sum = inst_df.sum(axis=1)
tmp_sum = tmp_sum[tmp_sum >= num_occur]
keep_names = tmp_sum.index.values.tolist()
if inst_rc == 'row':
if len(keep_names) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_names)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_names)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_names)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_names)
elif inst_rc == 'col':
inst_df = inst_df.transpose()
inst_rows = inst_df.index.values.tolist()
inst_cols = keep_names
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
return df | Filter a network's rows or cols based on num_occur values being above a
threshold (in absolute_value) | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L118-L169 |
ismms-himc/clustergrammer2 | clustergrammer2/clustergrammer_fun/make_clust_fun.py | make_clust | def make_clust(net, dist_type='cosine', run_clustering=True, dendro=True,
requested_views=['pct_row_sum', 'N_row_sum'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, sim_mat_views=['N_row_sum'],
run_enrichr=None, enrichrgram=None):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each filtering. This filtering will keep the top
N rows based on some quantity (sum, num-non-zero, etc).
'''
from copy import deepcopy
import scipy
from . import calc_clust, run_filter, make_views, make_sim_mat, cat_pval
from . import enrichr_functions as enr_fun
df = net.dat_to_df()
threshold = 0.0001
df = run_filter.df_filter_row_sum(df, threshold)
df = run_filter.df_filter_col_sum(df, threshold)
# default setting
define_cat_colors = False
if run_enrichr is not None:
df = enr_fun.add_enrichr_cats(df, 'row', run_enrichr)
define_cat_colors = True
# calculate initial view with no row filtering
net.df_to_dat(df, define_cat_colors=True)
inst_dm = calc_clust.cluster_row_and_col(net, dist_type=dist_type,
linkage_type=linkage_type,
run_clustering=run_clustering,
dendro=dendro, ignore_cat=False,
calc_cat_pval=calc_cat_pval)
all_views = []
send_df = deepcopy(df)
if 'N_row_sum' in requested_views:
all_views = make_views.N_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='sum')
if 'N_row_var' in requested_views:
all_views = make_views.N_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='var')
if 'pct_row_sum' in requested_views:
all_views = make_views.pct_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='sum')
if 'pct_row_var' in requested_views:
all_views = make_views.pct_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='var')
which_sim = []
if sim_mat == True:
which_sim = ['row', 'col']
elif sim_mat == 'row':
which_sim = ['row']
elif sim_mat == 'col':
which_sim = ['col']
if sim_mat is not False:
sim_net = make_sim_mat.main(net, inst_dm, which_sim, filter_sim, sim_mat_views)
net.sim = {}
for inst_rc in which_sim:
net.sim[inst_rc] = sim_net[inst_rc].viz
if inst_rc == 'row':
other_rc = 'col'
elif inst_rc == 'col':
other_rc = 'row'
# keep track of cat_colors
net.sim[inst_rc]['cat_colors'][inst_rc] = net.viz['cat_colors'][inst_rc]
net.sim[inst_rc]['cat_colors'][other_rc] = net.viz['cat_colors'][inst_rc]
else:
net.sim = {}
net.viz['views'] = all_views
if enrichrgram != None:
# toggle enrichrgram functionality from back-end
net.viz['enrichrgram'] = enrichrgram
if 'enrichrgram_lib' in net.dat:
net.viz['enrichrgram'] = True
net.viz['enrichrgram_lib'] = net.dat['enrichrgram_lib']
if 'row_cat_bars' in net.dat:
net.viz['row_cat_bars'] = net.dat['row_cat_bars'] | python | def make_clust(net, dist_type='cosine', run_clustering=True, dendro=True,
requested_views=['pct_row_sum', 'N_row_sum'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, sim_mat_views=['N_row_sum'],
run_enrichr=None, enrichrgram=None):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each filtering. This filtering will keep the top
N rows based on some quantity (sum, num-non-zero, etc).
'''
from copy import deepcopy
import scipy
from . import calc_clust, run_filter, make_views, make_sim_mat, cat_pval
from . import enrichr_functions as enr_fun
df = net.dat_to_df()
threshold = 0.0001
df = run_filter.df_filter_row_sum(df, threshold)
df = run_filter.df_filter_col_sum(df, threshold)
# default setting
define_cat_colors = False
if run_enrichr is not None:
df = enr_fun.add_enrichr_cats(df, 'row', run_enrichr)
define_cat_colors = True
# calculate initial view with no row filtering
net.df_to_dat(df, define_cat_colors=True)
inst_dm = calc_clust.cluster_row_and_col(net, dist_type=dist_type,
linkage_type=linkage_type,
run_clustering=run_clustering,
dendro=dendro, ignore_cat=False,
calc_cat_pval=calc_cat_pval)
all_views = []
send_df = deepcopy(df)
if 'N_row_sum' in requested_views:
all_views = make_views.N_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='sum')
if 'N_row_var' in requested_views:
all_views = make_views.N_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='var')
if 'pct_row_sum' in requested_views:
all_views = make_views.pct_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='sum')
if 'pct_row_var' in requested_views:
all_views = make_views.pct_rows(net, send_df, all_views,
dist_type=dist_type, rank_type='var')
which_sim = []
if sim_mat == True:
which_sim = ['row', 'col']
elif sim_mat == 'row':
which_sim = ['row']
elif sim_mat == 'col':
which_sim = ['col']
if sim_mat is not False:
sim_net = make_sim_mat.main(net, inst_dm, which_sim, filter_sim, sim_mat_views)
net.sim = {}
for inst_rc in which_sim:
net.sim[inst_rc] = sim_net[inst_rc].viz
if inst_rc == 'row':
other_rc = 'col'
elif inst_rc == 'col':
other_rc = 'row'
# keep track of cat_colors
net.sim[inst_rc]['cat_colors'][inst_rc] = net.viz['cat_colors'][inst_rc]
net.sim[inst_rc]['cat_colors'][other_rc] = net.viz['cat_colors'][inst_rc]
else:
net.sim = {}
net.viz['views'] = all_views
if enrichrgram != None:
# toggle enrichrgram functionality from back-end
net.viz['enrichrgram'] = enrichrgram
if 'enrichrgram_lib' in net.dat:
net.viz['enrichrgram'] = True
net.viz['enrichrgram_lib'] = net.dat['enrichrgram_lib']
if 'row_cat_bars' in net.dat:
net.viz['row_cat_bars'] = net.dat['row_cat_bars'] | This will calculate multiple views of a clustergram by filtering the
data and clustering after each filtering. This filtering will keep the top
N rows based on some quantity (sum, num-non-zero, etc). | https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/make_clust_fun.py#L1-L99 |
vstinner/bytecode | bytecode/flags.py | infer_flags | def infer_flags(bytecode, is_async=False):
"""Infer the proper flags for a bytecode based on the instructions.
"""
flags = CompilerFlags(0)
if not isinstance(bytecode, (_bytecode.Bytecode,
_bytecode.ConcreteBytecode,
_bytecode.ControlFlowGraph)):
msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '
'instance not %s')
raise ValueError(msg % bytecode)
instructions = (bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph) else
bytecode)
instr_names = {i.name for i in instructions
if not isinstance(i, (_bytecode.SetLineno,
_bytecode.Label))}
if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}):
flags |= CompilerFlags.OPTIMIZED
flags |= bytecode.flags & (CompilerFlags.NEWLOCALS |
CompilerFlags.VARARGS |
CompilerFlags.VARKEYWORDS |
CompilerFlags.NESTED)
if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}:
if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
flags |= CompilerFlags.GENERATOR
else:
flags |= CompilerFlags.ASYNC_GENERATOR
if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF',
'DELETE_DEREF', 'LOAD_CLASSDEREF'}):
flags |= CompilerFlags.NOFREE
if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or
flags & CompilerFlags.ASYNC_GENERATOR) and
(instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT',
'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or
bytecode.flags & CompilerFlags.COROUTINE)):
flags |= CompilerFlags.COROUTINE
flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
if ([bool(flags & getattr(CompilerFlags, k))
for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR',
'ASYNC_GENERATOR')].count(True) > 1):
raise ValueError("Code should not have more than one of the "
"following flag set : generator, coroutine, "
"iterable coroutine and async generator, got:"
"%s" % flags)
return flags | python | def infer_flags(bytecode, is_async=False):
"""Infer the proper flags for a bytecode based on the instructions.
"""
flags = CompilerFlags(0)
if not isinstance(bytecode, (_bytecode.Bytecode,
_bytecode.ConcreteBytecode,
_bytecode.ControlFlowGraph)):
msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '
'instance not %s')
raise ValueError(msg % bytecode)
instructions = (bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph) else
bytecode)
instr_names = {i.name for i in instructions
if not isinstance(i, (_bytecode.SetLineno,
_bytecode.Label))}
if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}):
flags |= CompilerFlags.OPTIMIZED
flags |= bytecode.flags & (CompilerFlags.NEWLOCALS |
CompilerFlags.VARARGS |
CompilerFlags.VARKEYWORDS |
CompilerFlags.NESTED)
if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}:
if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
flags |= CompilerFlags.GENERATOR
else:
flags |= CompilerFlags.ASYNC_GENERATOR
if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF',
'DELETE_DEREF', 'LOAD_CLASSDEREF'}):
flags |= CompilerFlags.NOFREE
if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or
flags & CompilerFlags.ASYNC_GENERATOR) and
(instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT',
'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or
bytecode.flags & CompilerFlags.COROUTINE)):
flags |= CompilerFlags.COROUTINE
flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
if ([bool(flags & getattr(CompilerFlags, k))
for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR',
'ASYNC_GENERATOR')].count(True) > 1):
raise ValueError("Code should not have more than one of the "
"following flag set : generator, coroutine, "
"iterable coroutine and async generator, got:"
"%s" % flags)
return flags | Infer the proper flags for a bytecode based on the instructions. | https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/flags.py#L33-L89 |
vstinner/bytecode | bytecode/cfg.py | ControlFlowGraph.to_bytecode | def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode | python | def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode | Convert to Bytecode. | https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/cfg.py#L279-L315 |
vstinner/bytecode | bytecode/cfg.py | ControlFlowGraph.to_code | def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize) | python | def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize) | Convert to code. | https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/cfg.py#L317-L322 |
vstinner/bytecode | bytecode/instr.py | Instr.set | def set(self, name, arg=UNSET):
"""Modify the instruction in-place.
Replace name and arg attributes. Don't modify lineno.
"""
self._set(name, arg, self._lineno) | python | def set(self, name, arg=UNSET):
"""Modify the instruction in-place.
Replace name and arg attributes. Don't modify lineno.
"""
self._set(name, arg, self._lineno) | Modify the instruction in-place.
Replace name and arg attributes. Don't modify lineno. | https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/instr.py#L248-L253 |
riga/tfdeploy | tfdeploy.py | setup | def setup(tf, order=None):
"""
Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of
different tensorflow versions. This function should only be called before :py:class:`Model`
creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed:
.. code-block:: python
import tensorflow as tf
import tfdeploy as td
td.setup(tf)
# ...
Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience.
"""
global _tf_version_string, _tf_version
_tf_version_string = tf.__version__
_tf_version = _parse_tf_version(_tf_version_string)
if order is not None:
optimize(order) | python | def setup(tf, order=None):
"""
Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of
different tensorflow versions. This function should only be called before :py:class:`Model`
creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed:
.. code-block:: python
import tensorflow as tf
import tfdeploy as td
td.setup(tf)
# ...
Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience.
"""
global _tf_version_string, _tf_version
_tf_version_string = tf.__version__
_tf_version = _parse_tf_version(_tf_version_string)
if order is not None:
optimize(order) | Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of
different tensorflow versions. This function should only be called before :py:class:`Model`
creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed:
.. code-block:: python
import tensorflow as tf
import tfdeploy as td
td.setup(tf)
# ...
Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L805-L827 |
riga/tfdeploy | tfdeploy.py | optimize | def optimize(order):
""" optimize(impl)
Tries to set the implementation type of all registered :py:class:`Operation` classes to *impl*.
This has no effect when an op does not implement that type.
The behavior is equivalent to:
.. code-block:: python
for op in Operation.__subclasses__():
if impl in op.impls:
op.use_impl(impl)
*impl* can also be a list or tuple of valid implementation types representing a preferred order.
"""
if not isinstance(order, (list, tuple)):
order = [order]
for op in Operation.__subclasses__():
for impl in order:
if impl in op.impls:
op.use_impl(impl)
break | python | def optimize(order):
""" optimize(impl)
Tries to set the implementation type of all registered :py:class:`Operation` classes to *impl*.
This has no effect when an op does not implement that type.
The behavior is equivalent to:
.. code-block:: python
for op in Operation.__subclasses__():
if impl in op.impls:
op.use_impl(impl)
*impl* can also be a list or tuple of valid implementation types representing a preferred order.
"""
if not isinstance(order, (list, tuple)):
order = [order]
for op in Operation.__subclasses__():
for impl in order:
if impl in op.impls:
op.use_impl(impl)
break | optimize(impl)
Tries to set the implementation type of all registered :py:class:`Operation` classes to *impl*.
This has no effect when an op does not implement that type.
The behavior is equivalent to:
.. code-block:: python
for op in Operation.__subclasses__():
if impl in op.impls:
op.use_impl(impl)
*impl* can also be a list or tuple of valid implementation types representing a preferred order. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L838-L860 |
riga/tfdeploy | tfdeploy.py | print_tensor | def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0):
""" print_tensor(td_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is
indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where
each tensor and each op count as a level.
"""
offset = depth * indent
line = "td tensor: %s" % td_tensor.name
if td_tensor.value is not None:
line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),)
print(offset + line)
if td_tensor.op and (max_depth < 0 or max_depth > depth):
print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1) | python | def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0):
""" print_tensor(td_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is
indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where
each tensor and each op count as a level.
"""
offset = depth * indent
line = "td tensor: %s" % td_tensor.name
if td_tensor.value is not None:
line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),)
print(offset + line)
if td_tensor.op and (max_depth < 0 or max_depth > depth):
print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1) | print_tensor(td_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is
indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where
each tensor and each op count as a level. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L863-L877 |
riga/tfdeploy | tfdeploy.py | print_op | def print_op(td_op, indent="| ", max_depth=-1, depth=0):
""" print_op(td_op, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Operation` *td_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
line = "td op: %s (%s)" % (td_op.name, ",".join(td_op.types))
print(offset + line)
if max_depth < 0 or max_depth > depth:
for td_tensor in td_op.inputs:
print_tensor(td_tensor, indent=indent, max_depth=max_depth, depth=depth+1) | python | def print_op(td_op, indent="| ", max_depth=-1, depth=0):
""" print_op(td_op, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Operation` *td_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
line = "td op: %s (%s)" % (td_op.name, ",".join(td_op.types))
print(offset + line)
if max_depth < 0 or max_depth > depth:
for td_tensor in td_op.inputs:
print_tensor(td_tensor, indent=indent, max_depth=max_depth, depth=depth+1) | print_op(td_op, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Operation` *td_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L880-L893 |
riga/tfdeploy | tfdeploy.py | print_tf_tensor | def print_tf_tensor(tf_tensor, indent="| ", max_depth=-1, depth=0):
""" print_tf_tensor(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow tensor *tf_tensor*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
shape = tuple(int(i) for i in tf_tensor.get_shape())
line = "tf tensor: %s (%s)" % (tf_tensor.name, ",".join(str(i) for i in shape))
print(offset + line)
if tf_tensor.op and (max_depth < 0 or max_depth > depth):
print_tf_op(tf_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1) | python | def print_tf_tensor(tf_tensor, indent="| ", max_depth=-1, depth=0):
""" print_tf_tensor(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow tensor *tf_tensor*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
shape = tuple(int(i) for i in tf_tensor.get_shape())
line = "tf tensor: %s (%s)" % (tf_tensor.name, ",".join(str(i) for i in shape))
print(offset + line)
if tf_tensor.op and (max_depth < 0 or max_depth > depth):
print_tf_op(tf_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1) | print_tf_tensor(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow tensor *tf_tensor*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L896-L909 |
riga/tfdeploy | tfdeploy.py | print_tf_op | def print_tf_op(tf_op, indent="| ", max_depth=-1, depth=0):
""" print_tf_op(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow operation *tf_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
line = "tf op: %s (%s)" % (tf_op.name, tf_op.type)
print(offset + line)
if max_depth < 0 or max_depth > depth:
for tf_tensor in tf_op.inputs:
print_tf_tensor(tf_tensor, indent=indent, max_depth=max_depth, depth=depth+1) | python | def print_tf_op(tf_op, indent="| ", max_depth=-1, depth=0):
""" print_tf_op(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow operation *tf_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
"""
offset = depth * indent
line = "tf op: %s (%s)" % (tf_op.name, tf_op.type)
print(offset + line)
if max_depth < 0 or max_depth > depth:
for tf_tensor in tf_op.inputs:
print_tf_tensor(tf_tensor, indent=indent, max_depth=max_depth, depth=depth+1) | print_tf_op(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow operation *tf_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L912-L925 |
riga/tfdeploy | tfdeploy.py | LinSpace | def LinSpace(start, stop, num):
"""
Linspace op.
"""
return np.linspace(start, stop, num=num, dtype=np.float32), | python | def LinSpace(start, stop, num):
"""
Linspace op.
"""
return np.linspace(start, stop, num=num, dtype=np.float32), | Linspace op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1003-L1007 |
riga/tfdeploy | tfdeploy.py | Range | def Range(start, limit, delta):
"""
Range op.
"""
return np.arange(start, limit, delta, dtype=np.int32), | python | def Range(start, limit, delta):
"""
Range op.
"""
return np.arange(start, limit, delta, dtype=np.int32), | Range op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1011-L1015 |
riga/tfdeploy | tfdeploy.py | RandomStandardNormal | def RandomStandardNormal(shape, dtype, seed):
"""
Standard (mu=0, sigma=1) gaussian op.
"""
if seed:
np.random.seed(seed)
return np.random.normal(size=reduce(mul, shape)).reshape(shape).astype(dtype_map[dtype]), | python | def RandomStandardNormal(shape, dtype, seed):
"""
Standard (mu=0, sigma=1) gaussian op.
"""
if seed:
np.random.seed(seed)
return np.random.normal(size=reduce(mul, shape)).reshape(shape).astype(dtype_map[dtype]), | Standard (mu=0, sigma=1) gaussian op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1023-L1029 |
riga/tfdeploy | tfdeploy.py | TruncatedNormal | def TruncatedNormal(shape, dtype, seed):
"""
Standard (mu=0, sigma=1) gaussian op with truncation above 2 sigma.
"""
if seed:
np.random.seed(seed)
n = reduce(mul, shape)
r = np.empty(n, dtype=dtype_map[dtype])
idxs = np.ones(n, dtype=np.bool)
while n:
r[idxs] = np.random.normal(size=n)
idxs = np.abs(r) > 2
n = np.sum(idxs)
return r.reshape(shape), | python | def TruncatedNormal(shape, dtype, seed):
"""
Standard (mu=0, sigma=1) gaussian op with truncation above 2 sigma.
"""
if seed:
np.random.seed(seed)
n = reduce(mul, shape)
r = np.empty(n, dtype=dtype_map[dtype])
idxs = np.ones(n, dtype=np.bool)
while n:
r[idxs] = np.random.normal(size=n)
idxs = np.abs(r) > 2
n = np.sum(idxs)
return r.reshape(shape), | Standard (mu=0, sigma=1) gaussian op with truncation above 2 sigma. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1033-L1046 |
riga/tfdeploy | tfdeploy.py | RandomUniform | def RandomUniform(shape, dtype, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed)
return np.random.uniform(size=shape).astype(dtype_map[dtype]), | python | def RandomUniform(shape, dtype, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed)
return np.random.uniform(size=shape).astype(dtype_map[dtype]), | Random uniform op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1050-L1056 |
riga/tfdeploy | tfdeploy.py | RandomUniformInt | def RandomUniformInt(shape, minval, maxval, seed):
"""
Random uniform int op.
"""
if seed:
np.random.seed(seed)
return np.random.randint(minval, maxval, size=shape), | python | def RandomUniformInt(shape, minval, maxval, seed):
"""
Random uniform int op.
"""
if seed:
np.random.seed(seed)
return np.random.randint(minval, maxval, size=shape), | Random uniform int op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1060-L1066 |
riga/tfdeploy | tfdeploy.py | RandomShuffle | def RandomShuffle(a, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed)
r = a.copy()
np.random.shuffle(r)
return r, | python | def RandomShuffle(a, seed):
"""
Random uniform op.
"""
if seed:
np.random.seed(seed)
r = a.copy()
np.random.shuffle(r)
return r, | Random uniform op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1070-L1078 |
riga/tfdeploy | tfdeploy.py | Rank | def Rank(a):
"""
Rank op.
"""
return np.array([len(a.shape)], dtype=np.int32), | python | def Rank(a):
"""
Rank op.
"""
return np.array([len(a.shape)], dtype=np.int32), | Rank op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1114-L1118 |
riga/tfdeploy | tfdeploy.py | Squeeze | def Squeeze(a, squeeze_dims):
"""
Squeeze op, i.e. removes singular axes.
"""
if not squeeze_dims:
squeeze_dims = list(range(len(a.shape)))
slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \
for i, dim in enumerate(a.shape)]
return np.copy(a)[slices], | python | def Squeeze(a, squeeze_dims):
"""
Squeeze op, i.e. removes singular axes.
"""
if not squeeze_dims:
squeeze_dims = list(range(len(a.shape)))
slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \
for i, dim in enumerate(a.shape)]
return np.copy(a)[slices], | Squeeze op, i.e. removes singular axes. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1130-L1138 |
riga/tfdeploy | tfdeploy.py | ExpandDims | def ExpandDims(a, dim):
"""
Expand dim op, i.e. add singular axis at dim.
"""
shape = list(a.shape)
if dim >= 0:
shape.insert(dim, 1)
else:
shape.insert(len(shape) + dim + 1, 1)
return np.copy(a).reshape(*shape), | python | def ExpandDims(a, dim):
"""
Expand dim op, i.e. add singular axis at dim.
"""
shape = list(a.shape)
if dim >= 0:
shape.insert(dim, 1)
else:
shape.insert(len(shape) + dim + 1, 1)
return np.copy(a).reshape(*shape), | Expand dim op, i.e. add singular axis at dim. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1142-L1151 |
riga/tfdeploy | tfdeploy.py | Slice | def Slice(a, begin, size):
"""
Slicing op.
"""
return np.copy(a)[[slice(*tpl) for tpl in zip(begin, begin+size)]], | python | def Slice(a, begin, size):
"""
Slicing op.
"""
return np.copy(a)[[slice(*tpl) for tpl in zip(begin, begin+size)]], | Slicing op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1159-L1163 |
riga/tfdeploy | tfdeploy.py | Split | def Split(axis, a, n):
"""
Split op with n splits.
"""
return tuple(np.split(np.copy(a), n, axis=axis)) | python | def Split(axis, a, n):
"""
Split op with n splits.
"""
return tuple(np.split(np.copy(a), n, axis=axis)) | Split op with n splits. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1167-L1171 |
riga/tfdeploy | tfdeploy.py | SplitV | def SplitV(a, splits, axis):
"""
Split op with multiple split sizes.
"""
return tuple(np.split(np.copy(a), np.cumsum(splits), axis=axis)) | python | def SplitV(a, splits, axis):
"""
Split op with multiple split sizes.
"""
return tuple(np.split(np.copy(a), np.cumsum(splits), axis=axis)) | Split op with multiple split sizes. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1175-L1179 |
riga/tfdeploy | tfdeploy.py | ConcatV2 | def ConcatV2(inputs):
"""
Concat op.
"""
axis = inputs.pop()
return np.concatenate(inputs, axis=axis), | python | def ConcatV2(inputs):
"""
Concat op.
"""
axis = inputs.pop()
return np.concatenate(inputs, axis=axis), | Concat op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1199-L1204 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.