desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Metric for number of predicted => active cells per column for each sequence @return (Metric) metric'
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):
self._mmComputeTransitionTraces() numCellsPerColumn = [] for predictedActiveCells in self._mmData['predictedActiveCellsForSequence'].values(): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) numCellsPerColumn += [len(x) for x in cellsForColumn.values()] return Metric(self, '# predicted => active cells per column for each sequence', numCellsPerColumn)
'Metric for number of sequences each predicted => active cell appears in Note: This metric is flawed when it comes to high-order sequences. @return (Metric) metric'
def mmGetMetricSequencesPredictedActiveCellsShared(self):
self._mmComputeTransitionTraces() numSequencesForCell = defaultdict((lambda : 0)) for predictedActiveCells in self._mmData['predictedActiveCellsForSequence'].values(): for cell in predictedActiveCells: numSequencesForCell[cell] += 1 return Metric(self, '# sequences each predicted => active cells appears in', numSequencesForCell.values())
'Pretty print the connections in the temporal memory. TODO: Use PrettyTable. @return (string) Pretty-printed text'
def mmPrettyPrintConnections(self):
text = '' text += 'Segments: (format => (#) [(source cell=permanence ...), ...]\n' text += '------------------------------------\n' columns = range(self.numberOfColumns()) for column in columns: cells = self.cellsForColumn(column) for cell in cells: segmentDict = dict() for seg in self.connections.segmentsForCell(cell): synapseList = [] for synapse in self.connections.synapsesForSegment(seg): synapseData = self.connections.dataForSynapse(synapse) synapseList.append((synapseData.presynapticCell, synapseData.permanence)) synapseList.sort() synapseStringList = ['{0:3}={1:.2f}'.format(sourceCell, permanence) for (sourceCell, permanence) in synapseList] segmentDict[seg] = '({0})'.format(' '.join(synapseStringList)) text += 'Column {0:3} / Cell {1:3}: DCTB ({2}) {3}\n'.format(column, cell, len(segmentDict.values()), '[{0}]'.format(', '.join(segmentDict.values()))) if (column < (len(columns) - 1)): text += '\n' text += '------------------------------------\n' return text
'Pretty print the cell representations for sequences in the history. @param sortby (string) Column of table to sort by @return (string) Pretty-printed text'
def mmPrettyPrintSequenceCellRepresentations(self, sortby='Column'):
self._mmComputeTransitionTraces() table = PrettyTable(['Pattern', 'Column', 'predicted=>active cells']) for (sequenceLabel, predictedActiveCells) in self._mmData['predictedActiveCellsForSequence'].iteritems(): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) for (column, cells) in cellsForColumn.iteritems(): table.add_row([sequenceLabel, column, list(cells)]) return table.get_string(sortby=sortby).encode('utf-8')
'Computes the transition traces, if necessary. Transition traces are the following: predicted => active cells predicted => inactive cells predicted => active columns predicted => inactive columns unpredicted => active columns'
def _mmComputeTransitionTraces(self):
if (not self._mmTransitionTracesStale): return self._mmData['predictedActiveCellsForSequence'] = defaultdict(set) self._mmTraces['predictedActiveCells'] = IndicesTrace(self, 'predicted => active cells (correct)') self._mmTraces['predictedInactiveCells'] = IndicesTrace(self, 'predicted => inactive cells (extra)') self._mmTraces['predictedActiveColumns'] = IndicesTrace(self, 'predicted => active columns (correct)') self._mmTraces['predictedInactiveColumns'] = IndicesTrace(self, 'predicted => inactive columns (extra)') self._mmTraces['unpredictedActiveColumns'] = IndicesTrace(self, 'unpredicted => active columns (bursting)') predictedCellsTrace = self._mmTraces['predictedCells'] for (i, activeColumns) in enumerate(self.mmGetTraceActiveColumns().data): predictedActiveCells = set() predictedInactiveCells = set() predictedActiveColumns = set() predictedInactiveColumns = set() for predictedCell in predictedCellsTrace.data[i]: predictedColumn = self.columnForCell(predictedCell) if (predictedColumn in activeColumns): predictedActiveCells.add(predictedCell) predictedActiveColumns.add(predictedColumn) sequenceLabel = self.mmGetTraceSequenceLabels().data[i] if (sequenceLabel is not None): self._mmData['predictedActiveCellsForSequence'][sequenceLabel].add(predictedCell) else: predictedInactiveCells.add(predictedCell) predictedInactiveColumns.add(predictedColumn) unpredictedActiveColumns = (activeColumns - predictedActiveColumns) self._mmTraces['predictedActiveCells'].data.append(predictedActiveCells) self._mmTraces['predictedInactiveCells'].data.append(predictedInactiveCells) self._mmTraces['predictedActiveColumns'].data.append(predictedActiveColumns) self._mmTraces['predictedInactiveColumns'].data.append(predictedInactiveColumns) self._mmTraces['unpredictedActiveColumns'].data.append(unpredictedActiveColumns) self._mmTransitionTracesStale = False
'Returns plot of the cell activity. @param title (string) an optional title for the figure @param showReset (bool) if true, the first set of cell activities after a reset will have a gray background @param resetShading (float) if showReset is true, this float specifies the intensity of the reset background with 0.0 being white and 1.0 being black @param activityType (string) The type of cell activity to display. Valid types include "activeCells", "predictiveCells", "predictedCells", and "predictedActiveCells" @return (Plot) plot'
def mmGetCellActivityPlot(self, title='', showReset=False, resetShading=0.25, activityType='activeCells'):
if (activityType == 'predictedActiveCells'): self._mmComputeTransitionTraces() cellTrace = copy.deepcopy(self._mmTraces[activityType].data) for i in xrange(len(cellTrace)): cellTrace[i] = self.getCellIndices(cellTrace[i]) return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(), activityType, title, showReset, resetShading)
'@param monitor (MonitorMixinBase) Monitor Mixin instance that generated this trace @param title (string) Title @param data (list) List of numbers to compute metric from'
def __init__(self, monitor, title, data):
self.monitor = monitor self.title = title self.min = None self.max = None self.sum = None self.mean = None self.standardDeviation = None self._computeStats(data)
'@param monitor (MonitorMixinBase) Monitor Mixin instance that generated this plot @param title (string) Plot title'
def __init__(self, monitor, title, show=True):
self._monitor = monitor self._title = title self._fig = self._initFigure() self._show = show if self._show: plt.ion() plt.show()
'Adds a graph to the plot\'s figure. @param data See matplotlib.Axes.plot documentation. @param position A 3-digit number. The first two digits define a 2D grid where subplots may be added. The final digit specifies the nth grid location for the added subplot @param xlabel text to be displayed on the x-axis @param ylabel text to be displayed on the y-axis'
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel) ax.plot(data) plt.draw()
'Adds a histogram to the plot\'s figure. @param data See matplotlib.Axes.hist documentation. @param position A 3-digit number. The first two digits define a 2D grid where subplots may be added. The final digit specifies the nth grid location for the added subplot @param xlabel text to be displayed on the x-axis @param ylabel text to be displayed on the y-axis'
def addHistogram(self, data, position=111, xlabel=None, ylabel=None, bins=None):
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel) ax.hist(data, bins=bins, color='green', alpha=0.8) plt.draw()
'Adds an image to the plot\'s figure. @param data a 2D array. See matplotlib.Axes.imshow documentation. @param position A 3-digit number. The first two digits define a 2D grid where subplots may be added. The final digit specifies the nth grid location for the added subplot @param xlabel text to be displayed on the x-axis @param ylabel text to be displayed on the y-axis @param cmap color map used in the rendering @param aspect how aspect ratio is handled during resize @param interpolation interpolation method'
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None, aspect='auto', interpolation='nearest', name=None):
if (cmap is None): cmap = cm.Greys ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel) ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation) if self._show: plt.draw() if (name is not None): if (not os.path.exists('log')): os.mkdir('log') plt.savefig('log/{name}.png'.format(name=name), bbox_inches='tight', figsize=(8, 6), dpi=400)
'Adds a subplot to the plot\'s figure at specified position. @param position A 3-digit number. The first two digits define a 2D grid where subplots may be added. The final digit specifies the nth grid location for the added subplot @param xlabel text to be displayed on the x-axis @param ylabel text to be displayed on the y-axis @returns (matplotlib.Axes) Axes instance'
def _addBase(self, position, xlabel=None, ylabel=None):
ax = self._fig.add_subplot(position) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return ax
'Note: If you set the kwarg "mmName", then pretty-printing of traces and metrics will include the name you specify as a tag before every title.'
def __init__(self, *args, **kwargs):
self.mmName = kwargs.get('mmName') if ('mmName' in kwargs): del kwargs['mmName'] super(MonitorMixinBase, self).__init__(*args, **kwargs) self._mmTraces = None self._mmData = None self.mmClearHistory()
'Clears the stored history.'
def mmClearHistory(self):
self._mmTraces = {} self._mmData = {}
'Returns pretty-printed table of traces. @param traces (list) Traces to print in table @param breakOnResets (BoolsTrace) Trace of resets to break table on @return (string) Pretty-printed table of traces.'
@staticmethod def mmPrettyPrintTraces(traces, breakOnResets=None):
assert (len(traces) > 0), 'No traces found' table = PrettyTable((['#'] + [trace.prettyPrintTitle() for trace in traces])) for i in xrange(len(traces[0].data)): if (breakOnResets and breakOnResets.data[i]): table.add_row((['<reset>'] * (len(traces) + 1))) table.add_row(([i] + [trace.prettyPrintDatum(trace.data[i]) for trace in traces])) return table.get_string().encode('utf-8')
'Returns pretty-printed table of metrics. @param metrics (list) Traces to print in table @param sigFigs (int) Number of significant figures to print @return (string) Pretty-printed table of metrics.'
@staticmethod def mmPrettyPrintMetrics(metrics, sigFigs=5):
assert (len(metrics) > 0), 'No metrics found' table = PrettyTable(['Metric', 'mean', 'standard deviation', 'min', 'max', 'sum']) for metric in metrics: table.add_row(([metric.prettyPrintTitle()] + metric.getStats())) return table.get_string().encode('utf-8')
'Returns list of default traces. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default traces'
def mmGetDefaultTraces(self, verbosity=1):
return []
'Returns list of default metrics. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default metrics'
def mmGetDefaultMetrics(self, verbosity=1):
return []
'Returns plot of the cell activity. Note that if many timesteps of activities are input, matplotlib\'s image interpolation may omit activities (columns in the image). @param cellTrace (list) a temporally ordered list of sets of cell activities @param cellCount (int) number of cells in the space being rendered @param activityType (string) type of cell activity being displayed @param title (string) an optional title for the figure @param showReset (bool) if true, the first set of cell activities after a reset will have a grayscale background @param resetShading (float) applicable if showReset is true, specifies the intensity of the reset background with 0.0 being white and 1.0 being black @return (Plot) plot'
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title='', showReset=False, resetShading=0.25):
plot = Plot(self, title) resetTrace = self.mmGetTraceResets().data data = numpy.zeros((cellCount, 1)) for i in xrange(len(cellTrace)): if (showReset and resetTrace[i]): activity = (numpy.ones((cellCount, 1)) * resetShading) else: activity = numpy.zeros((cellCount, 1)) activeIndices = cellTrace[i] activity[list(activeIndices)] = 1 data = numpy.concatenate((data, activity), 1) plot.add2DArray(data, xlabel='Time', ylabel=activityType, name=title) return plot
'Translate parameters and initialize member variables specific to TemporalMemory'
def __init__(self, columnDimensions=(2048,), cellsPerColumn=32, activationThreshold=13, initialPermanence=0.21, connectedPermanence=0.5, minThreshold=10, maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.1, seed=42):
numberOfCols = 1 for n in columnDimensions: numberOfCols *= n super(TemporalMemoryShim, self).__init__(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn, initialPerm=initialPermanence, connectedPerm=connectedPermanence, minThreshold=minThreshold, newSynapseCount=maxNewSynapseCount, permanenceInc=permanenceIncrement, permanenceDec=permanenceDecrement, permanenceMax=1.0, globalDecay=0, activationThreshold=activationThreshold, seed=seed) self.connections = Connections((numberOfCols * cellsPerColumn)) self.predictiveCells = set()
'Feeds input record through TM, performing inference and learning. Updates member variables with new state. @param activeColumns (set) Indices of active columns in `t`'
def compute(self, activeColumns, learn=True):
bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype) bottomUpInput[list(activeColumns)] = 1 super(TemporalMemoryShim, self).compute(bottomUpInput, enableLearn=learn, enableInference=True) predictedState = self.getPredictedState() self.predictiveCells = set(numpy.flatnonzero(predictedState))
'Process one input sample. This method is called by outer loop code outside the nupic-engine. We use this instead of the nupic engine compute() because our inputs and outputs aren\'t fixed size vectors of reals. :param recordNum: Record number of this input pattern. Record numbers normally increase sequentially by 1 each time unless there are missing records in the dataset. Knowing this information insures that we don\'t get confused by missing records. :param patternNZ: List of the active indices from the output below. When the input is from TemporalMemory, this list should be the indices of the active cells. :param classification: Dict of the classification information where: - bucketIdx: list of indices of the encoder bucket - actValue: list of actual values going into the encoder Classification could be None for inference mode. :param learn: (bool) if true, learn this sample :param infer: (bool) if true, perform inference :return: Dict containing inference results, there is one entry for each step in self.steps, where the key is the number of steps, and the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. There is also an entry containing the average actual value to use for each bucket. The key is \'actualValues\'. for example: .. code-block:: python {1 : [0.1, 0.3, 0.2, 0.7], 4 : [0.2, 0.4, 0.3, 0.5], \'actualValues\': [1.5, 3,5, 5,5, 7.6],'
def compute(self, recordNum, patternNZ, classification, learn, infer):
if (self.verbosity >= 1): print ' learn:', learn print ' recordNum:', recordNum print (' patternNZ (%d):' % len(patternNZ)), patternNZ print ' classificationIn:', classification if (len(self._patternNZHistory) > 0): if (recordNum < self._patternNZHistory[(-1)][0]): raise ValueError('the record number has to increase monotonically') if ((len(self._patternNZHistory) == 0) or (recordNum > self._patternNZHistory[(-1)][0])): self._patternNZHistory.append((recordNum, patternNZ)) retval = {} if (max(patternNZ) > self._maxInputIdx): newMaxInputIdx = max(patternNZ) for nSteps in self.steps: self._weightMatrix[nSteps] = numpy.concatenate((self._weightMatrix[nSteps], numpy.zeros(shape=((newMaxInputIdx - self._maxInputIdx), (self._maxBucketIdx + 1)))), axis=0) self._maxInputIdx = int(newMaxInputIdx) if (classification is not None): if (type(classification['bucketIdx']) is not list): bucketIdxList = [classification['bucketIdx']] actValueList = [classification['actValue']] numCategory = 1 else: bucketIdxList = classification['bucketIdx'] actValueList = classification['actValue'] numCategory = len(classification['bucketIdx']) else: if learn: raise ValueError('classification cannot be None when learn=True') actValueList = None bucketIdxList = None if infer: retval = self.infer(patternNZ, actValueList) if (learn and (classification['bucketIdx'] is not None)): for categoryI in range(numCategory): bucketIdx = bucketIdxList[categoryI] actValue = actValueList[categoryI] if (bucketIdx > self._maxBucketIdx): for nSteps in self.steps: self._weightMatrix[nSteps] = numpy.concatenate((self._weightMatrix[nSteps], numpy.zeros(shape=((self._maxInputIdx + 1), (bucketIdx - self._maxBucketIdx)))), axis=1) self._maxBucketIdx = int(bucketIdx) while (self._maxBucketIdx > (len(self._actualValues) - 1)): self._actualValues.append(None) if (self._actualValues[bucketIdx] is None): self._actualValues[bucketIdx] = actValue elif (isinstance(actValue, int) or isinstance(actValue, float) or isinstance(actValue, long)): self._actualValues[bucketIdx] = (((1.0 - self.actValueAlpha) * self._actualValues[bucketIdx]) + (self.actValueAlpha * actValue)) else: self._actualValues[bucketIdx] = actValue for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: error = self._calculateError(recordNum, bucketIdxList) nSteps = (recordNum - learnRecordNum) if (nSteps in self.steps): for bit in learnPatternNZ: self._weightMatrix[nSteps][bit, :] += (self.alpha * error[nSteps]) if (infer and (self.verbosity >= 1)): print ' inference: combined bucket likelihoods:' print ' actual bucket values:', retval['actualValues'] for (nSteps, votes) in retval.items(): if (nSteps == 'actualValues'): continue print (' %d steps: ' % nSteps), _pFormatArray(votes) bestBucketIdx = votes.argmax() print (' most likely bucket idx: %d, value: %s' % (bestBucketIdx, retval['actualValues'][bestBucketIdx])) print return retval
'Return the inference value from one input sample. The actual learning happens in compute(). :param patternNZ: list of the active indices from the output below :param classification: dict of the classification information: bucketIdx: index of the encoder bucket actValue: actual value going into the encoder :return: dict containing inference results, one entry for each step in self.steps. The key is the number of steps, the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. for example: .. code-block:: python {\'actualValues\': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]}'
def infer(self, patternNZ, actValueList):
if ((self.steps[0] == 0) or (actValueList is None)): defaultValue = 0 else: defaultValue = actValueList[0] actValues = [(x if (x is not None) else defaultValue) for x in self._actualValues] retval = {'actualValues': actValues} for nSteps in self.steps: predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps]) retval[nSteps] = predictDist return retval
'Perform inference for a single step. Given an SDR input and a weight matrix, return a predicted distribution. :param patternNZ: list of the active indices from the output below :param weightMatrix: numpy array of the weight matrix :return: numpy array of the predicted class label distribution'
def inferSingleStep(self, patternNZ, weightMatrix):
outputActivation = weightMatrix[patternNZ].sum(axis=0) expOutputActivation = numpy.exp(outputActivation) predictDist = (expOutputActivation / numpy.sum(expOutputActivation)) return predictDist
'Calculate error signal :param bucketIdxList: list of encoder buckets :return: dict containing error. The key is the number of steps The value is a numpy array of error at the output layer'
def _calculateError(self, recordNum, bucketIdxList):
error = dict() targetDist = numpy.zeros((self._maxBucketIdx + 1)) numCategories = len(bucketIdxList) for bucketIdx in bucketIdxList: targetDist[bucketIdx] = (1.0 / numCategories) for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: nSteps = (recordNum - learnRecordNum) if (nSteps in self.steps): predictDist = self.inferSingleStep(learnPatternNZ, self._weightMatrix[nSteps]) error[nSteps] = (targetDist - predictDist) return error
'Explicitly implement this for unit testing. The flatIdx is not designed to be consistent after serialize / deserialize, and the synapses might not enumerate in the same order.'
def __eq__(self, other):
return ((self.cell == other.cell) and (sorted(self._synapses, key=(lambda x: x._ordinal)) == sorted(other._synapses, key=(lambda x: x._ordinal))))
'Explicitly implement this for unit testing. Allow floating point differences for synapse permanence.'
def __eq__(self, other):
return ((self.segment.cell == other.segment.cell) and (self.presynapticCell == other.presynapticCell) and (abs((self.permanence - other.permanence)) < EPSILON))
'Returns the segments that belong to a cell. :param cell: (int) Cell index :returns: (list) Segment objects representing segments on the given cell.'
def segmentsForCell(self, cell):
return self._cells[cell]._segments
'Returns the synapses on a segment. :param segment: (int) Segment index :returns: (set) Synapse objects representing synapses on the given segment.'
def synapsesForSegment(self, segment):
return segment._synapses
'Returns the data for a synapse. .. note:: This method exists to match the interface of the C++ Connections. This allows tests and tools to inspect the connections using a common interface. :param synapse: (:class:`Synapse`) :returns: Synapse data'
def dataForSynapse(self, synapse):
return synapse
'Returns the data for a segment. .. note:: This method exists to match the interface of the C++ Connections. This allows tests and tools to inspect the connections using a common interface. :param segment (:class:`Segment`) :returns: segment data'
def dataForSegment(self, segment):
return segment
'Returns a :class:`Segment` object of the specified segment using data from the ``self._cells`` array. :param cell: (int) cell index :param idx: (int) segment index on a cell :returns: (:class:`Segment`) Segment object with index idx on the specified cell'
def getSegment(self, cell, idx):
return self._cells[cell]._segments[idx]
'Get the segment with the specified flatIdx. :param flatIdx: (int) The segment\'s flattened list index. :returns: (:class:`Segment`)'
def segmentForFlatIdx(self, flatIdx):
return self._segmentForFlatIdx[flatIdx]
'Get the needed length for a list to hold a value for every segment\'s flatIdx. :returns: (int) Required list length'
def segmentFlatListLength(self):
return self._nextFlatIdx
'Returns the synapses for the source cell that they synapse on. :param presynapticCell: (int) Source cell index :returns: (set) :class:`Synapse` objects'
def synapsesForPresynapticCell(self, presynapticCell):
return self._synapsesForPresynapticCell[presynapticCell]
'Adds a new segment on a cell. :param cell: (int) Cell index :returns: (int) New segment index'
def createSegment(self, cell):
cellData = self._cells[cell] if (len(self._freeFlatIdxs) > 0): flatIdx = self._freeFlatIdxs.pop() else: flatIdx = self._nextFlatIdx self._segmentForFlatIdx.append(None) self._nextFlatIdx += 1 ordinal = self._nextSegmentOrdinal self._nextSegmentOrdinal += 1 segment = Segment(cell, flatIdx, ordinal) cellData._segments.append(segment) self._segmentForFlatIdx[flatIdx] = segment return segment
'Destroys a segment. :param segment: (:class:`Segment`) representing the segment to be destroyed.'
def destroySegment(self, segment):
for synapse in segment._synapses: self._removeSynapseFromPresynapticMap(synapse) self._numSynapses -= len(segment._synapses) segments = self._cells[segment.cell]._segments i = segments.index(segment) del segments[i] self._freeFlatIdxs.append(segment.flatIdx) self._segmentForFlatIdx[segment.flatIdx] = None
'Creates a new synapse on a segment. :param segment: (:class:`Segment`) Segment object for synapse to be synapsed to. :param presynapticCell: (int) Source cell index. :param permanence: (float) Initial permanence of synapse. :returns: (:class:`Synapse`) created synapse'
def createSynapse(self, segment, presynapticCell, permanence):
idx = len(segment._synapses) synapse = Synapse(segment, presynapticCell, permanence, self._nextSynapseOrdinal) self._nextSynapseOrdinal += 1 segment._synapses.add(synapse) self._synapsesForPresynapticCell[presynapticCell].add(synapse) self._numSynapses += 1 return synapse
'Destroys a synapse. :param synapse: (:class:`Synapse`) synapse to destroy'
def destroySynapse(self, synapse):
self._numSynapses -= 1 self._removeSynapseFromPresynapticMap(synapse) synapse.segment._synapses.remove(synapse)
'Updates the permanence for a synapse. :param synapse: (class:`Synapse`) to be updated. :param permanence: (float) New permanence.'
def updateSynapsePermanence(self, synapse, permanence):
synapse.permanence = permanence
'Compute each segment\'s number of active synapses for a given input. In the returned lists, a segment\'s active synapse count is stored at index ``segment.flatIdx``. :param activePresynapticCells: (iter) Active cells. :param connectedPermanence: (float) Permanence threshold for a synapse to be considered connected :returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list], ``numActivePotentialSynapsesForSegment`` [list])'
def computeActivity(self, activePresynapticCells, connectedPermanence):
numActiveConnectedSynapsesForSegment = ([0] * self._nextFlatIdx) numActivePotentialSynapsesForSegment = ([0] * self._nextFlatIdx) threshold = (connectedPermanence - EPSILON) for cell in activePresynapticCells: for synapse in self._synapsesForPresynapticCell[cell]: flatIdx = synapse.segment.flatIdx numActivePotentialSynapsesForSegment[flatIdx] += 1 if (synapse.permanence > threshold): numActiveConnectedSynapsesForSegment[flatIdx] += 1 return (numActiveConnectedSynapsesForSegment, numActivePotentialSynapsesForSegment)
'Returns the number of segments. :param cell: (int) Optional parameter to get the number of segments on a cell. :returns: (int) Number of segments on all cells if cell is not specified, or on a specific specified cell'
def numSegments(self, cell=None):
if (cell is not None): return len(self._cells[cell]._segments) return (self._nextFlatIdx - len(self._freeFlatIdxs))
'Returns the number of Synapses. :param segment: (:class:`Segment`) Optional parameter to get the number of synapses on a segment. :returns: (int) Number of synapses on all segments if segment is not specified, or on a specified segment.'
def numSynapses(self, segment=None):
if (segment is not None): return len(segment._synapses) return self._numSynapses
'Return a numeric key for sorting this segment. This can be used with the python built-in ``sorted()`` function. :param segment: (:class:`Segment`) within this :class:`Connections` instance. :returns: (float) A numeric key for sorting.'
def segmentPositionSortKey(self, segment):
return (segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal)))
'Writes serialized data to proto object. :param proto: (DynamicStructBuilder) Proto object'
def write(self, proto):
protoCells = proto.init('cells', self.numCells) for i in xrange(self.numCells): segments = self._cells[i]._segments protoSegments = protoCells[i].init('segments', len(segments)) for (j, segment) in enumerate(segments): synapses = segment._synapses protoSynapses = protoSegments[j].init('synapses', len(synapses)) for (k, synapse) in enumerate(sorted(synapses, key=(lambda s: s._ordinal))): protoSynapses[k].presynapticCell = synapse.presynapticCell protoSynapses[k].permanence = synapse.permanence
'Reads deserialized data from proto object :param proto: (DynamicStructBuilder) Proto object :returns: (:class:`Connections`) instance'
@classmethod def read(cls, proto):
protoCells = proto.cells connections = cls(len(protoCells)) for (cellIdx, protoCell) in enumerate(protoCells): protoCell = protoCells[cellIdx] protoSegments = protoCell.segments connections._cells[cellIdx] = CellData() segments = connections._cells[cellIdx]._segments for (segmentIdx, protoSegment) in enumerate(protoSegments): segment = Segment(cellIdx, connections._nextFlatIdx, connections._nextSegmentOrdinal) segments.append(segment) connections._segmentForFlatIdx.append(segment) connections._nextFlatIdx += 1 connections._nextSegmentOrdinal += 1 synapses = segment._synapses protoSynapses = protoSegment.synapses for (synapseIdx, protoSynapse) in enumerate(protoSynapses): presynapticCell = protoSynapse.presynapticCell synapse = Synapse(segment, presynapticCell, protoSynapse.permanence, ordinal=connections._nextSynapseOrdinal) connections._nextSynapseOrdinal += 1 synapses.add(synapse) connections._synapsesForPresynapticCell[presynapticCell].add(synapse) connections._numSynapses += 1 return connections
'Equality operator for Connections instances. Checks if two instances are functionally identical :param other: (:class:`Connections`) Connections instance to compare to'
def __eq__(self, other):
for i in xrange(self.numCells): segments = self._cells[i]._segments otherSegments = other._cells[i]._segments if (len(segments) != len(otherSegments)): return False for j in xrange(len(segments)): segment = segments[j] otherSegment = otherSegments[j] synapses = segment._synapses otherSynapses = otherSegment._synapses if (len(synapses) != len(otherSynapses)): return False for synapse in synapses: found = False for candidate in otherSynapses: if (synapse == candidate): found = True break if (not found): return False if (len(self._synapsesForPresynapticCell) != len(self._synapsesForPresynapticCell)): return False for i in self._synapsesForPresynapticCell.keys(): synapses = self._synapsesForPresynapticCell[i] otherSynapses = other._synapsesForPresynapticCell[i] if (len(synapses) != len(otherSynapses)): return False for synapse in synapses: found = False for candidate in otherSynapses: if (synapse == candidate): found = True break if (not found): return False if (self._numSynapses != other._numSynapses): return False return True
'Non-equality operator for Connections instances. Checks if two instances are not functionally identical :param other: (:class:`Connections`) Connections instance to compare to'
def __ne__(self, other):
return (not self.__eq__(other))
'List of our member variables that we don\'t need to be saved.'
def _getEphemeralMembers(self):
return []
'Initialize all ephemeral members after being restored to a pickled state.'
def _initEphemerals(self):
self.segmentUpdates = {} self.resetStats() self._prevInfPatterns = [] self._prevLrnPatterns = [] stateShape = (self.numberOfCols, self.cellsPerColumn) self.lrnActiveState = {} self.lrnActiveState['t'] = numpy.zeros(stateShape, dtype='int8') self.lrnActiveState['t-1'] = numpy.zeros(stateShape, dtype='int8') self.lrnPredictedState = {} self.lrnPredictedState['t'] = numpy.zeros(stateShape, dtype='int8') self.lrnPredictedState['t-1'] = numpy.zeros(stateShape, dtype='int8') self.infActiveState = {} self.infActiveState['t'] = numpy.zeros(stateShape, dtype='int8') self.infActiveState['t-1'] = numpy.zeros(stateShape, dtype='int8') self.infActiveState['backup'] = numpy.zeros(stateShape, dtype='int8') self.infActiveState['candidate'] = numpy.zeros(stateShape, dtype='int8') self.infPredictedState = {} self.infPredictedState['t'] = numpy.zeros(stateShape, dtype='int8') self.infPredictedState['t-1'] = numpy.zeros(stateShape, dtype='int8') self.infPredictedState['backup'] = numpy.zeros(stateShape, dtype='int8') self.infPredictedState['candidate'] = numpy.zeros(stateShape, dtype='int8') self.cellConfidence = {} self.cellConfidence['t'] = numpy.zeros(stateShape, dtype='float32') self.cellConfidence['t-1'] = numpy.zeros(stateShape, dtype='float32') self.cellConfidence['candidate'] = numpy.zeros(stateShape, dtype='float32') self.colConfidence = {} self.colConfidence['t'] = numpy.zeros(self.numberOfCols, dtype='float32') self.colConfidence['t-1'] = numpy.zeros(self.numberOfCols, dtype='float32') self.colConfidence['candidate'] = numpy.zeros(self.numberOfCols, dtype='float32')
'@internal Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() for ephemeralMemberName in self._getEphemeralMembers(): state.pop(ephemeralMemberName, None) state['_random'] = self._getRandomState() state['version'] = TM_VERSION return state
'@internal Set the state of ourself from a serialized state.'
def __setstate__(self, state):
self._setRandomState(state['_random']) del state['_random'] version = state.pop('version') assert (version == TM_VERSION) self.__dict__.update(state)
'Populate serialization proto instance. :param proto: (BacktrackingTMProto) the proto instance to populate'
def write(self, proto):
proto.version = TM_VERSION self._random.write(proto.random) proto.numberOfCols = self.numberOfCols proto.cellsPerColumn = self.cellsPerColumn proto.initialPerm = float(self.initialPerm) proto.connectedPerm = float(self.connectedPerm) proto.minThreshold = self.minThreshold proto.newSynapseCount = self.newSynapseCount proto.permanenceInc = float(self.permanenceInc) proto.permanenceDec = float(self.permanenceDec) proto.permanenceMax = float(self.permanenceMax) proto.globalDecay = float(self.globalDecay) proto.activationThreshold = self.activationThreshold proto.doPooling = self.doPooling proto.segUpdateValidDuration = self.segUpdateValidDuration proto.burnIn = self.burnIn proto.collectStats = self.collectStats proto.verbosity = self.verbosity proto.pamLength = self.pamLength proto.maxAge = self.maxAge proto.maxInfBacktrack = self.maxInfBacktrack proto.maxLrnBacktrack = self.maxLrnBacktrack proto.maxSeqLength = self.maxSeqLength proto.maxSegmentsPerCell = self.maxSegmentsPerCell proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment proto.outputType = self.outputType proto.activeColumns = self.activeColumns cellListProto = proto.init('cells', len(self.cells)) for (i, columnSegments) in enumerate(self.cells): columnSegmentsProto = cellListProto.init(i, len(columnSegments)) for (j, cellSegments) in enumerate(columnSegments): cellSegmentsProto = columnSegmentsProto.init(j, len(cellSegments)) for (k, segment) in enumerate(cellSegments): segment.write(cellSegmentsProto[k]) proto.lrnIterationIdx = self.lrnIterationIdx proto.iterationIdx = self.iterationIdx proto.segID = self.segID if (self.currentOutput is not None): proto.currentOutput = self.currentOutput.tolist() proto.pamCounter = self.pamCounter proto.collectSequenceStats = self.collectSequenceStats proto.resetCalled = self.resetCalled proto.avgInputDensity = (self.avgInputDensity or (-1.0)) proto.learnedSeqLength = self.learnedSeqLength proto.avgLearnedSeqLength = self.avgLearnedSeqLength proto.prevLrnPatterns = self._prevLrnPatterns proto.prevInfPatterns = self._prevInfPatterns segmentUpdatesListProto = proto.init('segmentUpdates', len(self.segmentUpdates)) for (i, (key, updates)) in enumerate(self.segmentUpdates.iteritems()): cellSegmentUpdatesProto = segmentUpdatesListProto[i] cellSegmentUpdatesProto.columnIdx = key[0] cellSegmentUpdatesProto.cellIdx = key[1] segmentUpdatesProto = cellSegmentUpdatesProto.init('segmentUpdates', len(updates)) for (j, (lrnIterationIdx, segmentUpdate)) in enumerate(updates): segmentUpdateWrapperProto = segmentUpdatesProto[j] segmentUpdateWrapperProto.lrnIterationIdx = lrnIterationIdx segmentUpdate.write(segmentUpdateWrapperProto.segmentUpdate) proto.cellConfidenceT = self.cellConfidence['t'].tolist() proto.cellConfidenceT1 = self.cellConfidence['t-1'].tolist() proto.cellConfidenceCandidate = self.cellConfidence['candidate'].tolist() proto.colConfidenceT = self.colConfidence['t'].tolist() proto.colConfidenceT1 = self.colConfidence['t-1'].tolist() proto.colConfidenceCandidate = self.colConfidence['candidate'].tolist() proto.lrnActiveStateT = self.lrnActiveState['t'].tolist() proto.lrnActiveStateT1 = self.lrnActiveState['t-1'].tolist() proto.infActiveStateT = self.infActiveState['t'].tolist() proto.infActiveStateT1 = self.infActiveState['t-1'].tolist() proto.infActiveStateBackup = self.infActiveState['backup'].tolist() proto.infActiveStateCandidate = self.infActiveState['candidate'].tolist() proto.lrnPredictedStateT = self.lrnPredictedState['t'].tolist() proto.lrnPredictedStateT1 = self.lrnPredictedState['t-1'].tolist() proto.infPredictedStateT = self.infPredictedState['t'].tolist() proto.infPredictedStateT1 = self.infPredictedState['t-1'].tolist() proto.infPredictedStateBackup = self.infPredictedState['backup'].tolist() proto.infPredictedStateCandidate = self.infPredictedState['candidate'].tolist() proto.consolePrinterVerbosity = self.consolePrinterVerbosity
'Deserialize from proto instance. :param proto: (BacktrackingTMProto) the proto instance to read from'
@classmethod def read(cls, proto):
assert (proto.version == TM_VERSION) obj = object.__new__(cls) obj._random = Random() obj._random.read(proto.random) obj.numberOfCols = int(proto.numberOfCols) obj.cellsPerColumn = int(proto.cellsPerColumn) obj._numberOfCells = (obj.numberOfCols * obj.cellsPerColumn) obj.initialPerm = numpy.float32(proto.initialPerm) obj.connectedPerm = numpy.float32(proto.connectedPerm) obj.minThreshold = int(proto.minThreshold) obj.newSynapseCount = int(proto.newSynapseCount) obj.permanenceInc = numpy.float32(proto.permanenceInc) obj.permanenceDec = numpy.float32(proto.permanenceDec) obj.permanenceMax = numpy.float32(proto.permanenceMax) obj.globalDecay = numpy.float32(proto.globalDecay) obj.activationThreshold = int(proto.activationThreshold) obj.doPooling = proto.doPooling obj.segUpdateValidDuration = int(proto.segUpdateValidDuration) obj.burnIn = int(proto.burnIn) obj.collectStats = proto.collectStats obj.verbosity = int(proto.verbosity) obj.pamLength = int(proto.pamLength) obj.maxAge = int(proto.maxAge) obj.maxInfBacktrack = int(proto.maxInfBacktrack) obj.maxLrnBacktrack = int(proto.maxLrnBacktrack) obj.maxSeqLength = int(proto.maxSeqLength) obj.maxSegmentsPerCell = proto.maxSegmentsPerCell obj.maxSynapsesPerSegment = proto.maxSynapsesPerSegment obj.outputType = proto.outputType obj.activeColumns = [int(col) for col in proto.activeColumns] obj.cells = [[] for _ in xrange(len(proto.cells))] for (columnSegments, columnSegmentsProto) in zip(obj.cells, proto.cells): columnSegments.extend([[] for _ in xrange(len(columnSegmentsProto))]) for (cellSegments, cellSegmentsProto) in zip(columnSegments, columnSegmentsProto): for segmentProto in cellSegmentsProto: segment = Segment.read(segmentProto, obj) cellSegments.append(segment) obj.lrnIterationIdx = int(proto.lrnIterationIdx) obj.iterationIdx = int(proto.iterationIdx) obj.segID = int(proto.segID) obj.pamCounter = int(proto.pamCounter) obj.collectSequenceStats = proto.collectSequenceStats obj.resetCalled = proto.resetCalled avgInputDensity = proto.avgInputDensity if (avgInputDensity < 0.0): obj.avgInputDensity = None else: obj.avgInputDensity = avgInputDensity obj.learnedSeqLength = int(proto.learnedSeqLength) obj.avgLearnedSeqLength = proto.avgLearnedSeqLength obj._initEphemerals() obj.currentOutput = numpy.array(proto.currentOutput, dtype='float32') for pattern in proto.prevLrnPatterns: obj.prevLrnPatterns.append([v for v in pattern]) for pattern in proto.prevInfPatterns: obj.prevInfPatterns.append([v for v in pattern]) for cellWrapperProto in proto.segmentUpdates: key = (cellWrapperProto.columnIdx, cellWrapperProto.cellIdx) value = [] for updateWrapperProto in cellWrapperProto.segmentUpdates: segmentUpdate = SegmentUpdate.read(updateWrapperProto.segmentUpdate, obj) value.append((int(updateWrapperProto.lrnIterationIdx), segmentUpdate)) obj.segmentUpdates[key] = value numpy.copyto(obj.cellConfidence['t'], proto.cellConfidenceT) numpy.copyto(obj.cellConfidence['t-1'], proto.cellConfidenceT1) numpy.copyto(obj.cellConfidence['candidate'], proto.cellConfidenceCandidate) numpy.copyto(obj.colConfidence['t'], proto.colConfidenceT) numpy.copyto(obj.colConfidence['t-1'], proto.colConfidenceT1) numpy.copyto(obj.colConfidence['candidate'], proto.colConfidenceCandidate) numpy.copyto(obj.lrnActiveState['t'], proto.lrnActiveStateT) numpy.copyto(obj.lrnActiveState['t-1'], proto.lrnActiveStateT1) numpy.copyto(obj.infActiveState['t'], proto.infActiveStateT) numpy.copyto(obj.infActiveState['t-1'], proto.infActiveStateT1) numpy.copyto(obj.infActiveState['backup'], proto.infActiveStateBackup) numpy.copyto(obj.infActiveState['candidate'], proto.infActiveStateCandidate) numpy.copyto(obj.lrnPredictedState['t'], proto.lrnPredictedStateT) numpy.copyto(obj.lrnPredictedState['t-1'], proto.lrnPredictedStateT1) numpy.copyto(obj.infPredictedState['t'], proto.infPredictedStateT) numpy.copyto(obj.infPredictedState['t-1'], proto.infPredictedStateT1) numpy.copyto(obj.infPredictedState['backup'], proto.infPredictedStateBackup) numpy.copyto(obj.infPredictedState['candidate'], proto.infPredictedStateCandidate) obj.consolePrinterVerbosity = int(proto.consolePrinterVerbosity) return obj
'@internal Patch __getattr__ so that we can catch the first access to \'cells\' and load. This function is only called when we try to access an attribute that doesn\'t exist. We purposely make sure that "self.cells" doesn\'t exist after unpickling so that we\'ll hit this, then we can load it on the first access. If this is called at any other time, it will raise an AttributeError. That\'s because: - If \'name\' is "cells", after the first call, self._realCells won\'t exist so we\'ll get an implicit AttributeError. - If \'name\' isn\'t "cells", I\'d expect our super wouldn\'t have __getattr__, so we\'ll raise our own Attribute error. If the super did get __getattr__, we\'ll just return what it gives us.'
def __getattr__(self, name):
try: return super(BacktrackingTM, self).__getattr__(name) except AttributeError: raise AttributeError(("'TM' object has no attribute '%s'" % name))
'Implemented in :meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.saveToFile`.'
def saveToFile(self, filePath):
pass
'Implemented in :meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.loadFromFile`.'
def loadFromFile(self, filePath):
pass
'@internal Return the random number state. This is used during unit testing to generate repeatable results.'
def _getRandomState(self):
return pickle.dumps(self._random)
'@internal Set the random number state. This is used during unit testing to generate repeatable results.'
def _setRandomState(self, state):
self._random = pickle.loads(state)
'Reset the state of all cells. This is normally used between sequences while training. All internal states are reset to 0.'
def reset(self):
if (self.verbosity >= 3): print '\n==== RESET =====' self.lrnActiveState['t-1'].fill(0) self.lrnActiveState['t'].fill(0) self.lrnPredictedState['t-1'].fill(0) self.lrnPredictedState['t'].fill(0) self.infActiveState['t-1'].fill(0) self.infActiveState['t'].fill(0) self.infPredictedState['t-1'].fill(0) self.infPredictedState['t'].fill(0) self.cellConfidence['t-1'].fill(0) self.cellConfidence['t'].fill(0) self.segmentUpdates = {} self._internalStats['nInfersSinceReset'] = 0 self._internalStats['curPredictionScore'] = 0 self._internalStats['curPredictionScore2'] = 0 self._internalStats['curFalseNegativeScore'] = 0 self._internalStats['curFalsePositiveScore'] = 0 self._internalStats['curMissing'] = 0 self._internalStats['curExtra'] = 0 self._internalStats['prevSequenceSignature'] = None if self.collectSequenceStats: if (self._internalStats['confHistogram'].sum() > 0): sig = self._internalStats['confHistogram'].copy() sig.reshape((self.numberOfCols * self.cellsPerColumn)) self._internalStats['prevSequenceSignature'] = sig self._internalStats['confHistogram'].fill(0) self.resetCalled = True self._prevInfPatterns = [] self._prevLrnPatterns = []
'Reset the learning and inference stats. This will usually be called by user code at the start of each inference run (for a particular data set).'
def resetStats(self):
self._stats = dict() self._internalStats = dict() self._internalStats['nInfersSinceReset'] = 0 self._internalStats['nPredictions'] = 0 self._internalStats['curPredictionScore'] = 0 self._internalStats['curPredictionScore2'] = 0 self._internalStats['predictionScoreTotal2'] = 0 self._internalStats['curFalseNegativeScore'] = 0 self._internalStats['falseNegativeScoreTotal'] = 0 self._internalStats['curFalsePositiveScore'] = 0 self._internalStats['falsePositiveScoreTotal'] = 0 self._internalStats['pctExtraTotal'] = 0 self._internalStats['pctMissingTotal'] = 0 self._internalStats['curMissing'] = 0 self._internalStats['curExtra'] = 0 self._internalStats['totalMissing'] = 0 self._internalStats['totalExtra'] = 0 self._internalStats['prevSequenceSignature'] = None if self.collectSequenceStats: self._internalStats['confHistogram'] = numpy.zeros((self.numberOfCols, self.cellsPerColumn), dtype='float32')
'Return the current learning and inference stats. This returns a dict containing all the learning and inference stats we have collected since the last :meth:`resetStats` call. If :class:`BacktrackingTM` ``collectStats`` parameter is False, then None is returned. :returns: (dict) The following keys are returned in the dict when ``collectStats`` is True: - ``nPredictions``: the number of predictions. This is the total number of inferences excluding burn-in and the last inference. - ``curPredictionScore``: the score for predicting the current input (predicted during the previous inference) - ``curMissing``: the number of bits in the current input that were not predicted to be on. - ``curExtra``: the number of bits in the predicted output that are not in the next input - ``predictionScoreTotal``: the sum of every prediction score to date - ``predictionScoreAvg``: ``predictionScoreTotal / nPredictions`` - ``pctMissingTotal``: the total number of bits that were missed over all predictions - ``pctMissingAvg``: ``pctMissingTotal / nPredictions`` - ``prevSequenceSignature``: signature for the sequence immediately preceding the last reset. \'None\' if ``collectSequenceStats`` is False.'
def getStats(self):
if (not self.collectStats): return None self._stats['nPredictions'] = self._internalStats['nPredictions'] self._stats['curMissing'] = self._internalStats['curMissing'] self._stats['curExtra'] = self._internalStats['curExtra'] self._stats['totalMissing'] = self._internalStats['totalMissing'] self._stats['totalExtra'] = self._internalStats['totalExtra'] nPredictions = max(1, self._stats['nPredictions']) self._stats['curPredictionScore2'] = self._internalStats['curPredictionScore2'] self._stats['predictionScoreAvg2'] = (self._internalStats['predictionScoreTotal2'] / nPredictions) self._stats['curFalseNegativeScore'] = self._internalStats['curFalseNegativeScore'] self._stats['falseNegativeAvg'] = (self._internalStats['falseNegativeScoreTotal'] / nPredictions) self._stats['curFalsePositiveScore'] = self._internalStats['curFalsePositiveScore'] self._stats['falsePositiveAvg'] = (self._internalStats['falsePositiveScoreTotal'] / nPredictions) self._stats['pctExtraAvg'] = (self._internalStats['pctExtraTotal'] / nPredictions) self._stats['pctMissingAvg'] = (self._internalStats['pctMissingTotal'] / nPredictions) self._stats['prevSequenceSignature'] = self._internalStats['prevSequenceSignature'] return self._stats
'Called at the end of learning and inference, this routine will update a number of stats in our _internalStats dictionary, including our computed prediction score. :param stats internal stats dictionary :param bottomUpNZ list of the active bottom-up inputs :param predictedState The columns we predicted on the last time step (should match the current bottomUpNZ in the best case) :param colConfidence Column confidences we determined on the last time step'
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState, colConfidence):
if (not self.collectStats): return stats['nInfersSinceReset'] += 1 (numExtra2, numMissing2, confidences2) = self._checkPrediction(patternNZs=[bottomUpNZ], output=predictedState, colConfidence=colConfidence) (predictionScore, positivePredictionScore, negativePredictionScore) = confidences2[0] stats['curPredictionScore2'] = float(predictionScore) stats['curFalseNegativeScore'] = (1.0 - float(positivePredictionScore)) stats['curFalsePositiveScore'] = float(negativePredictionScore) stats['curMissing'] = numMissing2 stats['curExtra'] = numExtra2 if (stats['nInfersSinceReset'] <= self.burnIn): return stats['nPredictions'] += 1 numExpected = max(1.0, float(len(bottomUpNZ))) stats['totalMissing'] += numMissing2 stats['totalExtra'] += numExtra2 stats['pctExtraTotal'] += ((100.0 * numExtra2) / numExpected) stats['pctMissingTotal'] += ((100.0 * numMissing2) / numExpected) stats['predictionScoreTotal2'] += float(predictionScore) stats['falseNegativeScoreTotal'] += (1.0 - float(positivePredictionScore)) stats['falsePositiveScoreTotal'] += float(negativePredictionScore) if self.collectSequenceStats: cc = (self.cellConfidence['t-1'] * self.infActiveState['t']) sconf = cc.sum(axis=1) for c in range(self.numberOfCols): if (sconf[c] > 0): cc[c, :] /= sconf[c] self._internalStats['confHistogram'] += cc
'Print an integer array that is the same shape as activeState. :param aState: TODO: document'
def printState(self, aState):
def formatRow(var, i): s = '' for c in range(self.numberOfCols): if ((c > 0) and ((c % 10) == 0)): s += ' ' s += str(var[(c, i)]) s += ' ' return s for i in xrange(self.cellsPerColumn): print formatRow(aState, i)
'Print a floating point array that is the same shape as activeState. :param aState: TODO: document :param maxCols: TODO: document'
def printConfidence(self, aState, maxCols=20):
def formatFPRow(var, i): s = '' for c in range(min(maxCols, self.numberOfCols)): if ((c > 0) and ((c % 10) == 0)): s += ' ' s += (' %5.3f' % var[(c, i)]) s += ' ' return s for i in xrange(self.cellsPerColumn): print formatFPRow(aState, i)
'Print up to maxCols number from a flat floating point array. :param aState: TODO: document :param maxCols: TODO: document'
def printColConfidence(self, aState, maxCols=20):
def formatFPRow(var): s = '' for c in range(min(maxCols, self.numberOfCols)): if ((c > 0) and ((c % 10) == 0)): s += ' ' s += (' %5.3f' % var[c]) s += ' ' return s print formatFPRow(aState)
'TODO: document :param printPrevious: :param printLearnState: :return:'
def printStates(self, printPrevious=True, printLearnState=True):
def formatRow(var, i): s = '' for c in range(self.numberOfCols): if ((c > 0) and ((c % 10) == 0)): s += ' ' s += str(var[(c, i)]) s += ' ' return s print '\nInference Active state' for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.infActiveState['t-1'], i), print formatRow(self.infActiveState['t'], i) print 'Inference Predicted state' for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.infPredictedState['t-1'], i), print formatRow(self.infPredictedState['t'], i) if printLearnState: print '\nLearn Active state' for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.lrnActiveState['t-1'], i), print formatRow(self.lrnActiveState['t'], i) print 'Learn Predicted state' for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.lrnPredictedState['t-1'], i), print formatRow(self.lrnPredictedState['t'], i)
'TODO: document :param y: :return:'
def printOutput(self, y):
print 'Output' for i in xrange(self.cellsPerColumn): for c in xrange(self.numberOfCols): print int(y[(c, i)]), print
'TODO: document :param x: :return:'
def printInput(self, x):
print 'Input' for c in xrange(self.numberOfCols): print int(x[c]), print
'Print the parameter settings for the TM.'
def printParameters(self):
print 'numberOfCols=', self.numberOfCols print 'cellsPerColumn=', self.cellsPerColumn print 'minThreshold=', self.minThreshold print 'newSynapseCount=', self.newSynapseCount print 'activationThreshold=', self.activationThreshold print print 'initialPerm=', self.initialPerm print 'connectedPerm=', self.connectedPerm print 'permanenceInc=', self.permanenceInc print 'permanenceDec=', self.permanenceDec print 'permanenceMax=', self.permanenceMax print 'globalDecay=', self.globalDecay print print 'doPooling=', self.doPooling print 'segUpdateValidDuration=', self.segUpdateValidDuration print 'pamLength=', self.pamLength
'Print the list of ``[column, cellIdx]`` indices for each of the active cells in state. :param state: TODO: document :param andValues: TODO: document'
def printActiveIndices(self, state, andValues=False):
if (len(state.shape) == 2): (cols, cellIdxs) = state.nonzero() else: cols = state.nonzero()[0] cellIdxs = numpy.zeros(len(cols)) if (len(cols) == 0): print 'NONE' return prevCol = (-1) for (col, cellIdx) in zip(cols, cellIdxs): if (col != prevCol): if (prevCol != (-1)): print '] ', print ('Col %d: [' % col), prevCol = col if andValues: if (len(state.shape) == 2): value = state[(col, cellIdx)] else: value = state[col] print ('%d: %s,' % (cellIdx, value)), else: print ('%d,' % cellIdx), print ']'
'Called at the end of inference to print out various diagnostic information based on the current verbosity level. :param output: TODO: document :param learn: TODO: document'
def printComputeEnd(self, output, learn=False):
if (self.verbosity >= 3): print '----- computeEnd summary: ' print 'learn:', learn print ('numBurstingCols: %s, ' % self.infActiveState['t'].min(axis=1).sum()), print ('curPredScore2: %s, ' % self._internalStats['curPredictionScore2']), print ('curFalsePosScore: %s, ' % self._internalStats['curFalsePositiveScore']), print ('1-curFalseNegScore: %s, ' % (1 - self._internalStats['curFalseNegativeScore'])) print 'numSegments: ', self.getNumSegments(), print 'avgLearnedSeqLength: ', self.avgLearnedSeqLength print ('----- infActiveState (%d on) ------' % self.infActiveState['t'].sum()) self.printActiveIndices(self.infActiveState['t']) if (self.verbosity >= 6): self.printState(self.infActiveState['t']) print ('----- infPredictedState (%d on)-----' % self.infPredictedState['t'].sum()) self.printActiveIndices(self.infPredictedState['t']) if (self.verbosity >= 6): self.printState(self.infPredictedState['t']) print ('----- lrnActiveState (%d on) ------' % self.lrnActiveState['t'].sum()) self.printActiveIndices(self.lrnActiveState['t']) if (self.verbosity >= 6): self.printState(self.lrnActiveState['t']) print ('----- lrnPredictedState (%d on)-----' % self.lrnPredictedState['t'].sum()) self.printActiveIndices(self.lrnPredictedState['t']) if (self.verbosity >= 6): self.printState(self.lrnPredictedState['t']) print '----- cellConfidence -----' self.printActiveIndices(self.cellConfidence['t'], andValues=True) if (self.verbosity >= 6): self.printConfidence(self.cellConfidence['t']) print '----- colConfidence -----' self.printActiveIndices(self.colConfidence['t'], andValues=True) print '----- cellConfidence[t-1] for currently active cells -----' cc = (self.cellConfidence['t-1'] * self.infActiveState['t']) self.printActiveIndices(cc, andValues=True) if (self.verbosity == 4): print 'Cells, predicted segments only:' self.printCells(predictedOnly=True) elif (self.verbosity >= 5): print 'Cells, all segments:' self.printCells(predictedOnly=False) print elif (self.verbosity >= 1): print 'TM: learn:', learn print ('TM: active outputs(%d):' % len(output.nonzero()[0])), self.printActiveIndices(output.reshape(self.numberOfCols, self.cellsPerColumn))
'TODO: document :return:'
def printSegmentUpdates(self):
print '=== SEGMENT UPDATES ===, Num = ', len(self.segmentUpdates) for (key, updateList) in self.segmentUpdates.iteritems(): (c, i) = (key[0], key[1]) print c, i, updateList
'TODO: document :param c: :param i: :param onlyActiveSegments: :return:'
def printCell(self, c, i, onlyActiveSegments=False):
if (len(self.cells[c][i]) > 0): print 'Column', c, 'Cell', i, ':', print len(self.cells[c][i]), 'segment(s)' for (j, s) in enumerate(self.cells[c][i]): isActive = self._isSegmentActive(s, self.infActiveState['t']) if ((not onlyActiveSegments) or isActive): isActiveStr = ('*' if isActive else ' ') print (' %sSeg #%-3d' % (isActiveStr, j)), s.debugPrint()
'TODO: document :param predictedOnly: :return:'
def printCells(self, predictedOnly=False):
if predictedOnly: print '--- PREDICTED CELLS ---' else: print '--- ALL CELLS ---' print 'Activation threshold=', self.activationThreshold, print 'min threshold=', self.minThreshold, print 'connected perm=', self.connectedPerm for c in xrange(self.numberOfCols): for i in xrange(self.cellsPerColumn): if ((not predictedOnly) or self.infPredictedState['t'][(c, i)]): self.printCell(c, i, predictedOnly)
':param c: (int) column index :param i: (int) cell index within column :returns: (int) the total number of synapses in cell (c, i)'
def getNumSegmentsInCell(self, c, i):
return len(self.cells[c][i])
':returns: (int) the total number of synapses'
def getNumSynapses(self):
nSyns = self.getSegmentInfo()[1] return nSyns
':returns: (int) the average number of synapses per segment'
def getNumSynapsesPerSegmentAvg(self):
return (float(self.getNumSynapses()) / max(1, self.getNumSegments()))
':returns: (int) the total number of segments'
def getNumSegments(self):
nSegs = self.getSegmentInfo()[0] return nSegs
':returns: (int) the total number of cells'
def getNumCells(self):
return (self.numberOfCols * self.cellsPerColumn)
':param c: (int) column index :param i: (int) cell index in column :param segIdx: (int) segment index to match :returns: (list) representing the the segment on cell (c, i) with index ``segIdx``. [ [segmentID, sequenceSegmentFlag, positiveActivations, totalActivations, lastActiveIteration, lastPosDutyCycle, lastPosDutyCycleIteration], [col1, idx1, perm1], [col2, idx2, perm2], ...'
def getSegmentOnCell(self, c, i, segIdx):
seg = self.cells[c][i][segIdx] retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations, seg.totalActivations, seg.lastActiveIteration, seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]] retlist += seg.syns return retlist
'Store a dated potential segment update. The "date" (iteration index) is used later to determine whether the update is too old and should be forgotten. This is controlled by parameter ``segUpdateValidDuration``. :param c: TODO: document :param i: TODO: document :param segUpdate: TODO: document'
def _addToSegmentUpdates(self, c, i, segUpdate):
if ((segUpdate is None) or (len(segUpdate.activeSynapses) == 0)): return key = (c, i) if self.segmentUpdates.has_key(key): self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)] else: self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
'Remove a segment update (called when seg update expires or is processed) :param updateInfo: (tuple) (creationDate, SegmentUpdate)'
def _removeSegmentUpdate(self, updateInfo):
(creationDate, segUpdate) = updateInfo key = (segUpdate.columnIdx, segUpdate.cellIdx) self.segmentUpdates[key].remove(updateInfo)
'Computes output for both learning and inference. In both cases, the output is the boolean OR of ``activeState`` and ``predictedState`` at ``t``. Stores ``currentOutput`` for ``checkPrediction``. :returns: TODO: document'
def _computeOutput(self):
if (self.outputType == 'activeState1CellPerCol'): mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1) self.currentOutput = numpy.zeros(self.infActiveState['t'].shape, dtype='float32') numCols = self.currentOutput.shape[0] self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1 activeCols = self.infActiveState['t'].max(axis=1) inactiveCols = numpy.where((activeCols == 0))[0] self.currentOutput[inactiveCols, :] = 0 elif (self.outputType == 'activeState'): self.currentOutput = self.infActiveState['t'] elif (self.outputType == 'normal'): self.currentOutput = numpy.logical_or(self.infPredictedState['t'], self.infActiveState['t']) else: raise RuntimeError('Unimplemented outputType') return self.currentOutput.reshape((-1)).astype('float32')
'Return the current active state. This is called by the node to obtain the sequence output of the TM. :returns: TODO: document'
def _getActiveState(self):
return self.infActiveState['t'].reshape((-1)).astype('float32')
':returns: numpy array of predicted cells, representing the current predicted state. ``predictedCells[c][i]`` represents the state of the i\'th cell in the c\'th column.'
def getPredictedState(self):
return self.infPredictedState['t']
'This function gives the future predictions for <nSteps> timesteps starting from the current TM state. The TM is returned to its original state at the end before returning. 1. We save the TM state. 2. Loop for nSteps a. Turn-on with lateral support from the current active cells b. Set the predicted cells as the next step\'s active cells. This step in learn and infer methods use input here to correct the predictions. We don\'t use any input here. 3. Revert back the TM state to the time before prediction :param nSteps: (int) The number of future time steps to be predicted :returns: all the future predictions - a numpy array of type "float32" and shape (nSteps, numberOfCols). The ith row gives the tm prediction for each column at a future timestep (t+i+1).'
def predict(self, nSteps):
pristineTPDynamicState = self._getTPDynamicState() assert (nSteps > 0) multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols), dtype='float32') step = 0 while True: multiStepColumnPredictions[step, :] = self.topDownCompute() if (step == (nSteps - 1)): break step += 1 self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :] self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :] self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :] self.infActiveState['t'][:, :] = self.infPredictedState['t-1'][:, :] self.infPredictedState['t'].fill(0) self.cellConfidence['t'].fill(0.0) self._inferPhase2() self._setTPDynamicState(pristineTPDynamicState) return multiStepColumnPredictions
'Any newly added dynamic states in the TM should be added to this list. Parameters: retval: The list of names of TM dynamic state variables.'
def _getTPDynamicStateVariableNames(self):
return ['infActiveState', 'infPredictedState', 'lrnActiveState', 'lrnPredictedState', 'cellConfidence', 'colConfidence']
'Parameters: retval: A dict with all the dynamic state variable names as keys and their values at this instant as values.'
def _getTPDynamicState(self):
tpDynamicState = dict() for variableName in self._getTPDynamicStateVariableNames(): tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName]) return tpDynamicState
'Set all the dynamic state variables from the <tpDynamicState> dict. <tpDynamicState> dict has all the dynamic state variable names as keys and their values at this instant as values. We set the dynamic state variables in the tm object with these items.'
def _setTPDynamicState(self, tpDynamicState):
for variableName in self._getTPDynamicStateVariableNames(): self.__dict__[variableName] = tpDynamicState.pop(variableName)
'Update our moving average of learned sequence length.'
def _updateAvgLearnedSeqLength(self, prevSeqLength):
if (self.lrnIterationIdx < 100): alpha = 0.5 else: alpha = 0.1 self.avgLearnedSeqLength = (((1.0 - alpha) * self.avgLearnedSeqLength) + (alpha * prevSeqLength))
':returns: Moving average of learned sequence length'
def getAvgLearnedSeqLength(self):
return self.avgLearnedSeqLength
'This "backtracks" our inference state, trying to see if we can lock onto the current set of inputs by assuming the sequence started up to N steps ago on start cells. This will adjust @ref infActiveState[\'t\'] if it does manage to lock on to a sequence that started earlier. It will also compute infPredictedState[\'t\'] based on the possibly updated @ref infActiveState[\'t\'], so there is no need to call inferPhase2() after calling inferBacktrack(). This looks at: - ``infActiveState[\'t\']`` This updates/modifies: - ``infActiveState[\'t\']`` - ``infPredictedState[\'t\']`` - ``colConfidence[\'t\']`` - ``cellConfidence[\'t\']`` How it works: This method gets called from :meth:`updateInferenceState` when we detect either of the following two conditions: #. The current bottom-up input had too many un-expected columns #. We fail to generate a sufficient number of predicted columns for the next time step. Either of these two conditions indicate that we have fallen out of a learned sequence. Rather than simply "giving up" and bursting on the unexpected input columns, a better approach is to see if perhaps we are in a sequence that started a few steps ago. The real world analogy is that you are driving along and suddenly hit a dead-end, you will typically go back a few turns ago and pick up again from a familiar intersection. This back-tracking goes hand in hand with our learning methodology, which always tries to learn again from start cells after it loses context. This results in a network that has learned multiple, overlapping paths through the input data, each starting at different points. The lower the global decay and the more repeatability in the data, the longer each of these paths will end up being. The goal of this function is to find out which starting point in the past leads to the current input with the most context as possible. This gives us the best chance of predicting accurately going forward. Consider the following example, where you have learned the following sub-sequences which have the given frequencies: ? - Q - C - D - E 10X seq 0 ? - B - C - D - F 1X seq 1 ? - B - C - H - I 2X seq 2 ? - B - C - D - F 3X seq 3 ? - Z - A - B - C - D - J 2X seq 4 ? - Z - A - B - C - H - I 1X seq 5 ? - Y - A - B - C - D - F 3X seq 6 W - X - Z - A - B - C - D <= input history current time step Suppose, in the current time step, the input pattern is D and you have not predicted D, so you need to backtrack. Suppose we can backtrack up to 6 steps in the past, which path should we choose? From the table above, we can see that the correct answer is to assume we are in seq 4. How do we implement the backtrack to give us this right answer? The current implementation takes the following approach: #. Start from the farthest point in the past. #. For each starting point S, calculate the confidence of the current input, conf(startingPoint=S), assuming we followed that sequence. Note that we must have learned at least one sequence that starts at point S. #. If conf(startingPoint=S) is significantly different from conf(startingPoint=S-1), then choose S-1 as the starting point. The assumption here is that starting point S-1 is the starting point of a learned sub-sequence that includes the current input in it\'s path and that started the longest ago. It thus has the most context and will be the best predictor going forward. From the statistics in the above table, we can compute what the confidences will be for each possible starting point: startingPoint confidence of D B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3) Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5) First of all, we do not compute any confidences at starting points t-1, t-3, t-5, t-6 because there are no learned sequences that start at those points. Notice here that Z is the starting point of the longest sub-sequence leading up to the current input. Event though starting at t-2 and starting at t-4 give the same confidence value, we choose the sequence starting at t-4 because it gives the most context, and it mirrors the way that learning extends sequences. :param activeColumns: (list) of active column indices'
def _inferBacktrack(self, activeColumns):
numPrevPatterns = len(self._prevInfPatterns) if (numPrevPatterns <= 0): return currentTimeStepsOffset = (numPrevPatterns - 1) self.infActiveState['backup'][:, :] = self.infActiveState['t'][:, :] self.infPredictedState['backup'][:, :] = self.infPredictedState['t-1'][:, :] badPatterns = [] inSequence = False candConfidence = None candStartOffset = None for startOffset in range(0, numPrevPatterns): if ((startOffset == currentTimeStepsOffset) and (candConfidence is not None)): break if (self.verbosity >= 3): print (('Trying to lock-on using startCell state from %d steps ago:' % ((numPrevPatterns - 1) - startOffset)), self._prevInfPatterns[startOffset]) inSequence = False for offset in range(startOffset, numPrevPatterns): if (offset == currentTimeStepsOffset): totalConfidence = self.colConfidence['t'][activeColumns].sum() self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :] inSequence = self._inferPhase1(self._prevInfPatterns[offset], useStartCells=(offset == startOffset)) if (not inSequence): break if (self.verbosity >= 3): print (' backtrack: computing predictions from ', self._prevInfPatterns[offset]) inSequence = self._inferPhase2() if (not inSequence): break if (not inSequence): badPatterns.append(startOffset) continue candConfidence = totalConfidence candStartOffset = startOffset if ((self.verbosity >= 3) and (startOffset != currentTimeStepsOffset)): print ((' # Prediction confidence of current input after starting %d steps ago:' % ((numPrevPatterns - 1) - startOffset)), totalConfidence) if (candStartOffset == currentTimeStepsOffset): break self.infActiveState['candidate'][:, :] = self.infActiveState['t'][:, :] self.infPredictedState['candidate'][:, :] = self.infPredictedState['t'][:, :] self.cellConfidence['candidate'][:, :] = self.cellConfidence['t'][:, :] self.colConfidence['candidate'][:] = self.colConfidence['t'][:] break if (candStartOffset is None): if (self.verbosity >= 3): print 'Failed to lock on. Falling back to bursting all unpredicted.' self.infActiveState['t'][:, :] = self.infActiveState['backup'][:, :] self._inferPhase2() else: if (self.verbosity >= 3): print (('Locked on to current input by using start cells from %d steps ago:' % ((numPrevPatterns - 1) - candStartOffset)), self._prevInfPatterns[candStartOffset]) if (candStartOffset != currentTimeStepsOffset): self.infActiveState['t'][:, :] = self.infActiveState['candidate'][:, :] self.infPredictedState['t'][:, :] = self.infPredictedState['candidate'][:, :] self.cellConfidence['t'][:, :] = self.cellConfidence['candidate'][:, :] self.colConfidence['t'][:] = self.colConfidence['candidate'][:] for i in range(numPrevPatterns): if ((i in badPatterns) or ((candStartOffset is not None) and (i <= candStartOffset))): if (self.verbosity >= 3): print ('Removing useless pattern from history:', self._prevInfPatterns[0]) self._prevInfPatterns.pop(0) else: break self.infPredictedState['t-1'][:, :] = self.infPredictedState['backup'][:, :]
'Update the inference active state from the last set of predictions and the current bottom-up. This looks at: - ``infPredictedState[\'t-1\']`` This modifies: - ``infActiveState[\'t\']`` :param activeColumns: (list) active bottom-ups :param useStartCells: (bool) If true, ignore previous predictions and simply turn on the start cells in the active columns :returns: (bool) True if the current input was sufficiently predicted, OR if we started over on startCells. False indicates that the current input was NOT predicted, and we are now bursting on most columns.'
def _inferPhase1(self, activeColumns, useStartCells):
self.infActiveState['t'].fill(0) numPredictedColumns = 0 if useStartCells: for c in activeColumns: self.infActiveState['t'][(c, 0)] = 1 else: for c in activeColumns: predictingCells = numpy.where((self.infPredictedState['t-1'][c] == 1))[0] numPredictingCells = len(predictingCells) if (numPredictingCells > 0): self.infActiveState['t'][(c, predictingCells)] = 1 numPredictedColumns += 1 else: self.infActiveState['t'][c, :] = 1 if (useStartCells or (numPredictedColumns >= (0.5 * len(activeColumns)))): return True else: return False
'Phase 2 for the inference state. The computes the predicted state, then checks to insure that the predicted state is not over-saturated, i.e. look too close like a burst. This indicates that there were so many separate paths learned from the current input columns to the predicted input columns that bursting on the current input columns is most likely generated mix and match errors on cells in the predicted columns. If we detect this situation, we instead turn on only the start cells in the current active columns and re-generate the predicted state from those. This looks at: - `` infActiveState[\'t\']`` This modifies: - `` infPredictedState[\'t\']`` - `` colConfidence[\'t\']`` - `` cellConfidence[\'t\']`` :returns: (bool) True if we have a decent guess as to the next input. Returning False from here indicates to the caller that we have reached the end of a learned sequence.'
def _inferPhase2(self):
self.infPredictedState['t'].fill(0) self.cellConfidence['t'].fill(0) self.colConfidence['t'].fill(0) for c in xrange(self.numberOfCols): for i in xrange(self.cellsPerColumn): for s in self.cells[c][i]: numActiveSyns = self._getSegmentActivityLevel(s, self.infActiveState['t'], connectedSynapsesOnly=False) if (numActiveSyns < self.activationThreshold): continue if (self.verbosity >= 6): print ('incorporating DC from cell[%d,%d]: ' % (c, i)), s.debugPrint() dc = s.dutyCycle() self.cellConfidence['t'][(c, i)] += dc self.colConfidence['t'][c] += dc if self._isSegmentActive(s, self.infActiveState['t']): self.infPredictedState['t'][(c, i)] = 1 sumConfidences = self.colConfidence['t'].sum() if (sumConfidences > 0): self.colConfidence['t'] /= sumConfidences self.cellConfidence['t'] /= sumConfidences numPredictedCols = self.infPredictedState['t'].max(axis=1).sum() if (numPredictedCols >= (0.5 * self.avgInputDensity)): return True else: return False
'Update the inference state. Called from :meth:`compute` on every iteration. :param activeColumns: (list) active column indices.'
def _updateInferenceState(self, activeColumns):
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :] self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :] self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :] self.colConfidence['t-1'][:] = self.colConfidence['t'][:] if (self.maxInfBacktrack > 0): if (len(self._prevInfPatterns) > self.maxInfBacktrack): self._prevInfPatterns.pop(0) self._prevInfPatterns.append(activeColumns) inSequence = self._inferPhase1(activeColumns, self.resetCalled) if (not inSequence): if (self.verbosity >= 3): print 'Too much unpredicted input, re-tracing back to try and lock on at an earlier timestep.' self._inferBacktrack(activeColumns) return inSequence = self._inferPhase2() if (not inSequence): if (self.verbosity >= 3): print 'Not enough predictions going forward, re-tracing back to try and lock on at an earlier timestep.' self._inferBacktrack(activeColumns)
'A utility method called from learnBacktrack. This will backtrack starting from the given startOffset in our prevLrnPatterns queue. It returns True if the backtrack was successful and we managed to get predictions all the way up to the current time step. If readOnly, then no segments are updated or modified, otherwise, all segment updates that belong to the given path are applied. This updates/modifies: - lrnActiveState[\'t\'] This trashes: - lrnPredictedState[\'t\'] - lrnPredictedState[\'t-1\'] - lrnActiveState[\'t-1\'] :param startOffset: Start offset within the prevLrnPatterns input history :param readOnly: :return: True if we managed to lock on to a sequence that started earlier. If False, we lost predictions somewhere along the way leading up to the current time.'
def _learnBacktrackFrom(self, startOffset, readOnly=True):
numPrevPatterns = len(self._prevLrnPatterns) currentTimeStepsOffset = (numPrevPatterns - 1) if (not readOnly): self.segmentUpdates = {} if (self.verbosity >= 3): if readOnly: print (('Trying to lock-on using startCell state from %d steps ago:' % ((numPrevPatterns - 1) - startOffset)), self._prevLrnPatterns[startOffset]) else: print (('Locking on using startCell state from %d steps ago:' % ((numPrevPatterns - 1) - startOffset)), self._prevLrnPatterns[startOffset]) inSequence = True for offset in range(startOffset, numPrevPatterns): self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :] self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :] inputColumns = self._prevLrnPatterns[offset] if (not readOnly): self._processSegmentUpdates(inputColumns) if (offset == startOffset): self.lrnActiveState['t'].fill(0) for c in inputColumns: self.lrnActiveState['t'][(c, 0)] = 1 inSequence = True else: inSequence = self._learnPhase1(inputColumns, readOnly=readOnly) if ((not inSequence) or (offset == currentTimeStepsOffset)): break if (self.verbosity >= 3): print ' backtrack: computing predictions from ', inputColumns self._learnPhase2(readOnly=readOnly) return inSequence
'This "backtracks" our learning state, trying to see if we can lock onto the current set of inputs by assuming the sequence started up to N steps ago on start cells. This will adjust @ref lrnActiveState[\'t\'] if it does manage to lock on to a sequence that started earlier. :returns: >0 if we managed to lock on to a sequence that started earlier. The value returned is how many steps in the past we locked on. If 0 is returned, the caller needs to change active state to start on start cells. How it works: This method gets called from updateLearningState when we detect either of the following two conditions: #. Our PAM counter (@ref pamCounter) expired #. We reached the max allowed learned sequence length Either of these two conditions indicate that we want to start over on start cells. Rather than start over on start cells on the current input, we can accelerate learning by backtracking a few steps ago and seeing if perhaps a sequence we already at least partially know already started. This updates/modifies: - @ref lrnActiveState[\'t\'] This trashes: - @ref lrnActiveState[\'t-1\'] - @ref lrnPredictedState[\'t\'] - @ref lrnPredictedState[\'t-1\']'
def _learnBacktrack(self):
numPrevPatterns = (len(self._prevLrnPatterns) - 1) if (numPrevPatterns <= 0): if (self.verbosity >= 3): print 'lrnBacktrack: No available history to backtrack from' return False badPatterns = [] inSequence = False for startOffset in range(0, numPrevPatterns): inSequence = self._learnBacktrackFrom(startOffset, readOnly=True) if inSequence: break badPatterns.append(startOffset) if (not inSequence): if (self.verbosity >= 3): print 'Failed to lock on. Falling back to start cells on current time step.' self._prevLrnPatterns = [] return False if (self.verbosity >= 3): print (('Discovered path to current input by using start cells from %d steps ago:' % (numPrevPatterns - startOffset)), self._prevLrnPatterns[startOffset]) self._learnBacktrackFrom(startOffset, readOnly=False) for i in range(numPrevPatterns): if ((i in badPatterns) or (i <= startOffset)): if (self.verbosity >= 3): print ('Removing useless pattern from history:', self._prevLrnPatterns[0]) self._prevLrnPatterns.pop(0) else: break return (numPrevPatterns - startOffset)
'Compute the learning active state given the predicted state and the bottom-up input. :param activeColumns list of active bottom-ups :param readOnly True if being called from backtracking logic. This tells us not to increment any segment duty cycles or queue up any updates. :returns: True if the current input was sufficiently predicted, OR if we started over on startCells. False indicates that the current input was NOT predicted, well enough to consider it as "inSequence" This looks at: - @ref lrnActiveState[\'t-1\'] - @ref lrnPredictedState[\'t-1\'] This modifies: - @ref lrnActiveState[\'t\'] - @ref lrnActiveState[\'t-1\']'
def _learnPhase1(self, activeColumns, readOnly=False):
self.lrnActiveState['t'].fill(0) numUnpredictedColumns = 0 for c in activeColumns: predictingCells = numpy.where((self.lrnPredictedState['t-1'][c] == 1))[0] numPredictedCells = len(predictingCells) assert (numPredictedCells <= 1) if (numPredictedCells == 1): i = predictingCells[0] self.lrnActiveState['t'][(c, i)] = 1 continue numUnpredictedColumns += 1 if readOnly: continue (i, s, numActive) = self._getBestMatchingCell(c, self.lrnActiveState['t-1'], self.minThreshold) if ((s is not None) and s.isSequenceSegment()): if (self.verbosity >= 4): print 'Learn branch 0, found segment match. Learning on col=', c self.lrnActiveState['t'][(c, i)] = 1 segUpdate = self._getSegmentActiveSynapses(c, i, s, self.lrnActiveState['t-1'], newSynapses=True) s.totalActivations += 1 trimSegment = self._adaptSegment(segUpdate) if trimSegment: self._trimSegmentsInCell(c, i, [s], minPermanence=1e-05, minNumSyns=0) else: i = self._getCellForNewSegment(c) if (self.verbosity >= 4): print 'Learn branch 1, no match. Learning on col=', c, print ', newCellIdxInCol=', i self.lrnActiveState['t'][(c, i)] = 1 segUpdate = self._getSegmentActiveSynapses(c, i, None, self.lrnActiveState['t-1'], newSynapses=True) segUpdate.sequenceSegment = True self._adaptSegment(segUpdate) numBottomUpColumns = len(activeColumns) if (numUnpredictedColumns < (numBottomUpColumns / 2)): return True else: return False
'Compute the predicted segments given the current set of active cells. :param readOnly True if being called from backtracking logic. This tells us not to increment any segment duty cycles or queue up any updates. This computes the lrnPredictedState[\'t\'] and queues up any segments that became active (and the list of active synapses for each segment) into the segmentUpdates queue This looks at: - @ref lrnActiveState[\'t\'] This modifies: - @ref lrnPredictedState[\'t\'] - @ref segmentUpdates'
def _learnPhase2(self, readOnly=False):
self.lrnPredictedState['t'].fill(0) for c in xrange(self.numberOfCols): (i, s, numActive) = self._getBestMatchingCell(c, self.lrnActiveState['t'], minThreshold=self.activationThreshold) if (i is None): continue self.lrnPredictedState['t'][(c, i)] = 1 if readOnly: continue segUpdate = self._getSegmentActiveSynapses(c, i, s, activeState=self.lrnActiveState['t'], newSynapses=(numActive < self.newSynapseCount)) s.totalActivations += 1 self._addToSegmentUpdates(c, i, segUpdate) if self.doPooling: predSegment = self._getBestMatchingSegment(c, i, self.lrnActiveState['t-1']) segUpdate = self._getSegmentActiveSynapses(c, i, predSegment, self.lrnActiveState['t-1'], newSynapses=True) self._addToSegmentUpdates(c, i, segUpdate)