desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Update the learning state. Called from compute() on every iteration :param activeColumns List of active column indices'
def _updateLearningState(self, activeColumns):
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :] self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :] if (self.maxLrnBacktrack > 0): if (len(self._prevLrnPatterns) > self.maxLrnBacktrack): self._prevLrnPatterns.pop(0) self._prevLrnPatterns.append(activeColumns) if (self.verbosity >= 4): print 'Previous learn patterns: \n' print self._prevLrnPatterns self._processSegmentUpdates(activeColumns) if (self.pamCounter > 0): self.pamCounter -= 1 self.learnedSeqLength += 1 if (not self.resetCalled): inSequence = self._learnPhase1(activeColumns) if inSequence: self.pamCounter = self.pamLength if (self.verbosity >= 3): print 'pamCounter = ', self.pamCounter, 'seqLength = ', self.learnedSeqLength if (self.resetCalled or (self.pamCounter == 0) or ((self.maxSeqLength != 0) and (self.learnedSeqLength >= self.maxSeqLength))): if (self.verbosity >= 3): if self.resetCalled: print 'Starting over:', activeColumns, '(reset was called)' elif (self.pamCounter == 0): print 'Starting over:', activeColumns, '(PAM counter expired)' else: print 'Starting over:', activeColumns, '(reached maxSeqLength)' if (self.pamCounter == 0): seqLength = (self.learnedSeqLength - self.pamLength) else: seqLength = self.learnedSeqLength if (self.verbosity >= 3): print ' learned sequence length was:', seqLength self._updateAvgLearnedSeqLength(seqLength) backSteps = 0 if (not self.resetCalled): backSteps = self._learnBacktrack() if (self.resetCalled or (backSteps is None) or (backSteps == 0)): backSteps = 0 self.lrnActiveState['t'].fill(0) for c in activeColumns: self.lrnActiveState['t'][(c, 0)] = 1 self._prevLrnPatterns = [] self.pamCounter = self.pamLength self.learnedSeqLength = backSteps self.segmentUpdates = {} self._learnPhase2()
'Handle one compute, possibly learning. .. note:: It is an error to have both ``enableLearn`` and ``enableInference`` set to False .. note:: By default, we don\'t compute the inference output when learning because it slows things down, but you can override this by passing in True for ``enableInference``. :param bottomUpInput: The bottom-up input as numpy list, typically from a spatial pooler. :param enableLearn: (bool) If true, perform learning :param enableInference: (bool) If None, default behavior is to disable the inference output when ``enableLearn`` is on. If true, compute the inference output. If false, do not compute the inference output. :returns: TODO: document'
def compute(self, bottomUpInput, enableLearn, enableInference=None):
if (enableInference is None): if enableLearn: enableInference = False else: enableInference = True assert (enableLearn or enableInference) activeColumns = bottomUpInput.nonzero()[0] if enableLearn: self.lrnIterationIdx += 1 self.iterationIdx += 1 if (self.verbosity >= 3): print ('\n==== PY Iteration: %d =====' % self.iterationIdx) print 'Active cols:', activeColumns if enableLearn: if (self.lrnIterationIdx in Segment.dutyCycleTiers): for (c, i) in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): for segment in self.cells[c][i]: segment.dutyCycle() if (self.avgInputDensity is None): self.avgInputDensity = len(activeColumns) else: self.avgInputDensity = ((0.99 * self.avgInputDensity) + (0.01 * len(activeColumns))) if enableInference: self._updateInferenceState(activeColumns) if enableLearn: self._updateLearningState(activeColumns) if ((self.globalDecay > 0.0) and ((self.lrnIterationIdx % self.maxAge) == 0)): for (c, i) in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): segsToDel = [] for segment in self.cells[c][i]: age = (self.lrnIterationIdx - segment.lastActiveIteration) if (age <= self.maxAge): continue synsToDel = [] for synapse in segment.syns: synapse[2] = (synapse[2] - self.globalDecay) if (synapse[2] <= 0): synsToDel.append(synapse) if (len(synsToDel) == segment.getNumSynapses()): segsToDel.append(segment) elif (len(synsToDel) > 0): for syn in synsToDel: segment.syns.remove(syn) for seg in segsToDel: self._cleanUpdatesList(c, i, seg) self.cells[c][i].remove(seg) if self.collectStats: if enableInference: predictedState = self.infPredictedState['t-1'] else: predictedState = self.lrnPredictedState['t-1'] self._updateStatsInferEnd(self._internalStats, activeColumns, predictedState, self.colConfidence['t-1']) output = self._computeOutput() self.printComputeEnd(output, learn=enableLearn) self.resetCalled = False return output
'TODO: document :param bottomUpInput: :return:'
def infer(self, bottomUpInput):
return self.compute(bottomUpInput, enableLearn=False)
'TODO: document :param bottomUpInput: :param enableInference: :return:'
def learn(self, bottomUpInput, enableInference=None):
return self.compute(bottomUpInput, enableLearn=True, enableInference=enableInference)
'Returns the stored cell confidences from the last compute. :returns: Column confidence scores'
def _columnConfidences(self):
return self.colConfidence['t']
'For now, we will assume there is no one above us and that bottomUpOut is simply the output that corresponds to our currently stored column confidences. :returns: the same thing as :meth:`columnConfidences`'
def topDownCompute(self):
return self._columnConfidences()
'This method goes through a list of segments for a given cell and deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. :param colIdx Column index :param cellIdx Cell index within the column :param segList List of segment references :param minPermanence Any syn whose permamence is 0 or < minPermanence will be deleted. :param minNumSyns Any segment with less than minNumSyns synapses remaining in it will be deleted. :returns: tuple (numSegsRemoved, numSynsRemoved)'
def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence, minNumSyns):
if (minPermanence is None): minPermanence = self.connectedPerm if (minNumSyns is None): minNumSyns = self.activationThreshold (nSegsRemoved, nSynsRemoved) = (0, 0) segsToDel = [] for segment in segList: synsToDel = [syn for syn in segment.syns if (syn[2] < minPermanence)] if (len(synsToDel) == len(segment.syns)): segsToDel.append(segment) else: if (len(synsToDel) > 0): for syn in synsToDel: segment.syns.remove(syn) nSynsRemoved += 1 if (len(segment.syns) < minNumSyns): segsToDel.append(segment) nSegsRemoved += len(segsToDel) for seg in segsToDel: self._cleanUpdatesList(colIdx, cellIdx, seg) self.cells[colIdx][cellIdx].remove(seg) nSynsRemoved += len(seg.syns) return (nSegsRemoved, nSynsRemoved)
'This method deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. :param minPermanence: (float) Any syn whose permanence is 0 or < ``minPermanence`` will be deleted. If None is passed in, then ``self.connectedPerm`` is used. :param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses remaining in it will be deleted. If None is passed in, then ``self.activationThreshold`` is used. :returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``'
def trimSegments(self, minPermanence=None, minNumSyns=None):
if (minPermanence is None): minPermanence = self.connectedPerm if (minNumSyns is None): minNumSyns = self.activationThreshold (totalSegsRemoved, totalSynsRemoved) = (0, 0) for (c, i) in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): (segsRemoved, synsRemoved) = self._trimSegmentsInCell(colIdx=c, cellIdx=i, segList=self.cells[c][i], minPermanence=minPermanence, minNumSyns=minNumSyns) totalSegsRemoved += segsRemoved totalSynsRemoved += synsRemoved if (self.verbosity >= 5): print 'Cells, all segments:' self.printCells(predictedOnly=False) return (totalSegsRemoved, totalSynsRemoved)
'Removes any update that would be for the given col, cellIdx, segIdx. NOTE: logically, we need to do this when we delete segments, so that if an update refers to a segment that was just deleted, we also remove that update from the update list. However, I haven\'t seen it trigger in any of the unit tests yet, so it might mean that it\'s not needed and that situation doesn\'t occur, by construction.'
def _cleanUpdatesList(self, col, cellIdx, seg):
for (key, updateList) in self.segmentUpdates.iteritems(): (c, i) = (key[0], key[1]) if ((c == col) and (i == cellIdx)): for update in updateList: if (update[1].segment == seg): self._removeSegmentUpdate(update)
'Called when learning has been completed. This method just calls :meth:`trimSegments` and then clears out caches.'
def finishLearning(self):
self.trimSegments(minPermanence=0.0001) for (c, i) in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): for segment in self.cells[c][i]: segment.dutyCycle() if (self.cellsPerColumn > 1): for c in xrange(self.numberOfCols): assert (self.getNumSegmentsInCell(c, 0) == 0)
'This function produces goodness-of-match scores for a set of input patterns, by checking for their presence in the current and predicted output of the TM. Returns a global count of the number of extra and missing bits, the confidence scores for each input pattern, and (if requested) the bits in each input pattern that were not present in the TM\'s prediction. :param patternNZs a list of input patterns that we want to check for. Each element is a list of the non-zeros in that pattern. :param output The output of the TM. If not specified, then use the TM\'s current output. This can be specified if you are trying to check the prediction metric for an output from the past. :param colConfidence The column confidences. If not specified, then use the TM\'s current self.colConfidence. This can be specified if you are trying to check the prediction metrics for an output from the past. :param details if True, also include details of missing bits per pattern. :returns: list containing: totalExtras, totalMissing, [conf_1, conf_2, ...], [missing1, missing2, ...] @retval totalExtras a global count of the number of \'extras\', i.e. bits that are on in the current output but not in the or of all the passed in patterns @retval totalMissing a global count of all the missing bits, i.e. the bits that are on in the or of the patterns, but not in the current output @retval conf_i the confidence score for the i\'th pattern inpatternsToCheck This consists of 3 items as a tuple: (predictionScore, posPredictionScore, negPredictionScore) @retval missing_i the bits in the i\'th pattern that were missing in the output. This list is only returned if details is True.'
def _checkPrediction(self, patternNZs, output=None, colConfidence=None, details=False):
numPatterns = len(patternNZs) orAll = set() orAll = orAll.union(*patternNZs) if (output is None): assert (self.currentOutput is not None) output = self.currentOutput output = set(output.sum(axis=1).nonzero()[0]) totalExtras = len(output.difference(orAll)) totalMissing = len(orAll.difference(output)) if (colConfidence is None): colConfidence = self.colConfidence['t'] confidences = [] for i in xrange(numPatterns): positivePredictionSum = colConfidence[patternNZs[i]].sum() positiveColumnCount = len(patternNZs[i]) totalPredictionSum = colConfidence.sum() totalColumnCount = len(colConfidence) negativePredictionSum = (totalPredictionSum - positivePredictionSum) negativeColumnCount = (totalColumnCount - positiveColumnCount) if (positiveColumnCount != 0): positivePredictionScore = positivePredictionSum else: positivePredictionScore = 0.0 if (negativeColumnCount != 0): negativePredictionScore = negativePredictionSum else: negativePredictionScore = 0.0 currentSum = (negativePredictionScore + positivePredictionScore) if (currentSum > 0): positivePredictionScore *= (1.0 / currentSum) negativePredictionScore *= (1.0 / currentSum) predictionScore = (positivePredictionScore - negativePredictionScore) confidences.append((predictionScore, positivePredictionScore, negativePredictionScore)) if details: missingPatternBits = [set(pattern).difference(output) for pattern in patternNZs] return (totalExtras, totalMissing, confidences, missingPatternBits) else: return (totalExtras, totalMissing, confidences)
'A segment is active if it has >= activationThreshold connected synapses that are active due to activeState. Notes: studied various cutoffs, none of which seem to be worthwhile list comprehension didn\'t help either :param seg TODO: document :param activeState TODO: document'
def _isSegmentActive(self, seg, activeState):
return isSegmentActive(seg.syns, activeState, self.connectedPerm, self.activationThreshold)
'This routine computes the activity level of a segment given activeState. It can tally up only connected synapses (permanence >= connectedPerm), or all the synapses of the segment, at either t or t-1. :param seg TODO: document :param activeState TODO: document :param connectedSynapsesOnly TODO: document'
def _getSegmentActivityLevel(self, seg, activeState, connectedSynapsesOnly=False):
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly, self.connectedPerm)
'Find weakly activated cell in column with at least minThreshold active synapses. :param c which column to look at :param activeState the active cells :param minThreshold minimum number of synapses required :returns: tuple (cellIdx, segment, numActiveSynapses)'
def _getBestMatchingCell(self, c, activeState, minThreshold):
bestActivityInCol = minThreshold bestSegIdxInCol = (-1) bestCellInCol = (-1) for i in xrange(self.cellsPerColumn): maxSegActivity = 0 maxSegIdx = 0 for (j, s) in enumerate(self.cells[c][i]): activity = self._getSegmentActivityLevel(s, activeState) if (activity > maxSegActivity): maxSegActivity = activity maxSegIdx = j if (maxSegActivity >= bestActivityInCol): bestActivityInCol = maxSegActivity bestSegIdxInCol = maxSegIdx bestCellInCol = i if (bestCellInCol == (-1)): return (None, None, None) else: return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol], bestActivityInCol)
'For the given cell, find the segment with the largest number of active synapses. This routine is aggressive in finding the best match. The permanence value of synapses is allowed to be below connectedPerm. The number of active synapses is allowed to be below activationThreshold, but must be above minThreshold. The routine returns the segment index. If no segments are found, then an index of -1 is returned. :param c TODO: document :param i TODO: document :param activeState TODO: document'
def _getBestMatchingSegment(self, c, i, activeState):
(maxActivity, which) = (self.minThreshold, (-1)) for (j, s) in enumerate(self.cells[c][i]): activity = self._getSegmentActivityLevel(s, activeState, connectedSynapsesOnly=False) if (activity >= maxActivity): (maxActivity, which) = (activity, j) if (which == (-1)): return None else: return self.cells[c][i][which]
'Return the index of a cell in this column which is a good candidate for adding a new segment. When we have fixed size resources in effect, we insure that we pick a cell which does not already have the max number of allowed segments. If none exists, we choose the least used segment in the column to re-allocate. :param colIdx which column to look at :returns: cell index'
def _getCellForNewSegment(self, colIdx):
if (self.maxSegmentsPerCell < 0): if (self.cellsPerColumn > 1): i = (self._random.getUInt32((self.cellsPerColumn - 1)) + 1) else: i = 0 return i candidateCellIdxs = [] if (self.cellsPerColumn == 1): minIdx = 0 maxIdx = 0 else: minIdx = 1 maxIdx = (self.cellsPerColumn - 1) for i in xrange(minIdx, (maxIdx + 1)): numSegs = len(self.cells[colIdx][i]) if (numSegs < self.maxSegmentsPerCell): candidateCellIdxs.append(i) if (len(candidateCellIdxs) > 0): candidateCellIdx = candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))] if (self.verbosity >= 5): print ('Cell [%d,%d] chosen for new segment, # of segs is %d' % (colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))) return candidateCellIdx candidateSegment = None candidateSegmentDC = 1.0 for i in xrange(minIdx, (maxIdx + 1)): for s in self.cells[colIdx][i]: dc = s.dutyCycle() if (dc < candidateSegmentDC): candidateCellIdx = i candidateSegmentDC = dc candidateSegment = s if (self.verbosity >= 5): print ('Deleting segment #%d for cell[%d,%d] to make room for new segment' % (candidateSegment.segID, colIdx, candidateCellIdx)) candidateSegment.debugPrint() self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment) self.cells[colIdx][candidateCellIdx].remove(candidateSegment) return candidateCellIdx
'Return a segmentUpdate data structure containing a list of proposed changes to segment s. Let activeSynapses be the list of active synapses where the originating cells have their activeState output = 1 at time step t. (This list is empty if s is None since the segment doesn\'t exist.) newSynapses is an optional argument that defaults to false. If newSynapses is true, then newSynapseCount - len(activeSynapses) synapses are added to activeSynapses. These synapses are randomly chosen from the set of cells that have learnState = 1 at timeStep. :param c TODO: document :param i TODO: document :param s TODO: document :param activeState TODO: document :param newSynapses TODO: document'
def _getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
activeSynapses = [] if (s is not None): activeSynapses = [idx for (idx, syn) in enumerate(s.syns) if activeState[(syn[0], syn[1])]] if newSynapses: nSynapsesToAdd = (self.newSynapseCount - len(activeSynapses)) activeSynapses += self._chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd, activeState) update = BacktrackingTM._SegmentUpdate(c, i, s, activeSynapses) return update
'Choose n random cells to learn from. This function is called several times while learning with timeStep = t-1, so we cache the set of candidates for that case. It\'s also called once with timeStep = t, and we cache that set of candidates. :returns: tuple (column index, cell index).'
def _chooseCellsToLearnFrom(self, c, i, s, n, activeState):
if (n <= 0): return [] tmpCandidates = numpy.where((activeState == 1)) if (len(tmpCandidates[0]) == 0): return [] if (s is None): cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])] else: synapsesAlreadyInSegment = set(((syn[0], syn[1]) for syn in s.syns)) cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1]) if ((syn[0], syn[1]) not in synapsesAlreadyInSegment)] if (len(cands) <= n): return cands if (n == 1): idx = self._random.getUInt32(len(cands)) return [cands[idx]] indices = numpy.array([j for j in range(len(cands))], dtype='uint32') tmp = numpy.zeros(min(n, len(indices)), dtype='uint32') self._random.sample(indices, tmp) return sorted([cands[j] for j in tmp])
'Go through the list of accumulated segment updates and process them as follows: if the segment update is too old, remove the update else if the cell received bottom-up, update its permanences else if it\'s still being predicted, leave it in the queue else remove it. :param activeColumns TODO: document'
def _processSegmentUpdates(self, activeColumns):
removeKeys = [] trimSegments = [] for (key, updateList) in self.segmentUpdates.iteritems(): (c, i) = (key[0], key[1]) if (c in activeColumns): action = 'update' elif (self.doPooling and (self.lrnPredictedState['t'][(c, i)] == 1)): action = 'keep' else: action = 'remove' updateListKeep = [] if (action != 'remove'): for (createDate, segUpdate) in updateList: if (self.verbosity >= 4): print '_nLrnIterations =', self.lrnIterationIdx, print segUpdate if ((self.lrnIterationIdx - createDate) > self.segUpdateValidDuration): continue if (action == 'update'): trimSegment = self._adaptSegment(segUpdate) if trimSegment: trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment)) else: updateListKeep.append((createDate, segUpdate)) self.segmentUpdates[key] = updateListKeep if (len(updateListKeep) == 0): removeKeys.append(key) for key in removeKeys: self.segmentUpdates.pop(key) for (c, i, segment) in trimSegments: self._trimSegmentsInCell(c, i, [segment], minPermanence=1e-05, minNumSyns=0)
'This function applies segment update information to a segment in a cell. Synapses on the active list get their permanence counts incremented by permanenceInc. All other synapses get their permanence counts decremented by permanenceDec. We also increment the positiveActivations count of the segment. :param segUpdate SegmentUpdate instance :returns: True if some synapses were decremented to 0 and the segment is a candidate for trimming'
def _adaptSegment(self, segUpdate):
trimSegment = False (c, i, segment) = (segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment) activeSynapses = segUpdate.activeSynapses synToUpdate = set([syn for syn in activeSynapses if (type(syn) == int)]) if (segment is not None): if (self.verbosity >= 4): print ('Reinforcing segment #%d for cell[%d,%d]' % (segment.segID, c, i)) print ' before:', segment.debugPrint() segment.lastActiveIteration = self.lrnIterationIdx segment.positiveActivations += 1 segment.dutyCycle(active=True) lastSynIndex = (len(segment.syns) - 1) inactiveSynIndices = [s for s in xrange(0, (lastSynIndex + 1)) if (s not in synToUpdate)] trimSegment = segment.updateSynapses(inactiveSynIndices, (- self.permanenceDec)) activeSynIndices = [syn for syn in synToUpdate if (syn <= lastSynIndex)] segment.updateSynapses(activeSynIndices, self.permanenceInc) synsToAdd = [syn for syn in activeSynapses if (type(syn) != int)] if ((self.maxSynapsesPerSegment > 0) and ((len(synsToAdd) + len(segment.syns)) > self.maxSynapsesPerSegment)): numToFree = ((len(segment.syns) + len(synsToAdd)) - self.maxSynapsesPerSegment) segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity) for newSyn in synsToAdd: segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm) if (self.verbosity >= 4): print ' after:', segment.debugPrint() else: newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment) for synapse in activeSynapses: newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm) if (self.verbosity >= 3): print ('New segment #%d for cell[%d,%d]' % ((self.segID - 1), c, i)), newSegment.debugPrint() self.cells[c][i].append(newSegment) return trimSegment
'Returns information about the distribution of segments, synapses and permanence values in the current TM. If requested, also returns information regarding the number of currently active segments and synapses. :returns: tuple described below: nSegments, nSynapses, nActiveSegs, nActiveSynapses, distSegSizes, distNSegsPerCell, distPermValues, distAges - ``nSegments``: (int) total number of segments - ``nSynapses``: (int) total number of synapses - ``nActiveSegs``: (int) total number of active segments (0 if ``collectActiveData`` is False) - ``nActiveSynapses``: (int) total number of active synapses 0 if ``collectActiveData`` is False - ``distSegSizes``: (dict) where d[n] = number of segments with n synapses - ``distNSegsPerCell``: (dict) where d[n] = number of cells with n segments - ``distPermValues``: (dict) where d[p] = number of synapses with perm = p/10 - ``distAges``: (list) of tuples (``ageRange``, ``numSegments``)'
def getSegmentInfo(self, collectActiveData=False):
(nSegments, nSynapses) = (0, 0) (nActiveSegs, nActiveSynapses) = (0, 0) (distSegSizes, distNSegsPerCell) = ({}, {}) distPermValues = {} numAgeBuckets = 20 distAges = [] ageBucketSize = int(((self.lrnIterationIdx + 20) / 20)) for i in range(numAgeBuckets): distAges.append([('%d-%d' % ((i * ageBucketSize), (((i + 1) * ageBucketSize) - 1))), 0]) for c in xrange(self.numberOfCols): for i in xrange(self.cellsPerColumn): if (len(self.cells[c][i]) > 0): nSegmentsThisCell = len(self.cells[c][i]) nSegments += nSegmentsThisCell if distNSegsPerCell.has_key(nSegmentsThisCell): distNSegsPerCell[nSegmentsThisCell] += 1 else: distNSegsPerCell[nSegmentsThisCell] = 1 for seg in self.cells[c][i]: nSynapsesThisSeg = seg.getNumSynapses() nSynapses += nSynapsesThisSeg if distSegSizes.has_key(nSynapsesThisSeg): distSegSizes[nSynapsesThisSeg] += 1 else: distSegSizes[nSynapsesThisSeg] = 1 for syn in seg.syns: p = int((syn[2] * 10)) if distPermValues.has_key(p): distPermValues[p] += 1 else: distPermValues[p] = 1 age = (self.lrnIterationIdx - seg.lastActiveIteration) ageBucket = int((age / ageBucketSize)) distAges[ageBucket][1] += 1 if collectActiveData: if self._isSegmentActive(seg, self.infActiveState['t']): nActiveSegs += 1 for syn in seg.syns: if (self.activeState['t'][syn[0]][syn[1]] == 1): nActiveSynapses += 1 return (nSegments, nSynapses, nActiveSegs, nActiveSynapses, distSegSizes, distNSegsPerCell, distPermValues, distAges)
'Compute/update and return the positive activations duty cycle of this segment. This is a measure of how often this segment is providing good predictions. :param active True if segment just provided a good prediction :param readOnly If True, compute the updated duty cycle, but don\'t change the cached value. This is used by debugging print statements. :returns: The duty cycle, a measure of how often this segment is providing good predictions. **NOTE:** This method relies on different schemes to compute the duty cycle based on how much history we have. In order to support this tiered approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER** (@ref dutyCycleTiers). When we don\'t have a lot of history yet (first tier), we simply return number of positive activations / total number of iterations After a certain number of iterations have accumulated, it converts into a moving average calculation, which is updated only when requested since it can be a bit expensive to compute on every iteration (it uses the pow() function). The duty cycle is computed as follows: dc[t] = (1-alpha) * dc[t-1] + alpha * value[t] If the value[t] has been 0 for a number of steps in a row, you can apply all of the updates at once using: dc[t] = (1-alpha)^(t-lastT) * dc[lastT] We use the alphas and tiers as defined in @ref dutyCycleAlphas and @ref dutyCycleTiers.'
def dutyCycle(self, active=False, readOnly=False):
if (self.tm.lrnIterationIdx <= self.dutyCycleTiers[1]): dutyCycle = (float(self.positiveActivations) / self.tm.lrnIterationIdx) if (not readOnly): self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle age = (self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration) if ((age == 0) and (not active)): return self._lastPosDutyCycle for tierIdx in range((len(self.dutyCycleTiers) - 1), 0, (-1)): if (self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]): alpha = self.dutyCycleAlphas[tierIdx] break dutyCycle = (pow((1.0 - alpha), age) * self._lastPosDutyCycle) if active: dutyCycle += alpha if (not readOnly): self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle
'Print segment information for verbose messaging and debugging. This uses the following format: ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75 where: 54413 - is the unique segment id True - is sequence segment 0.64801 - moving average duty cycle (24/36) - (numPositiveActivations / numTotalActivations) 101 - age, number of iterations since last activated [9,1]0.75 - synapse from column 9, cell #1, strength 0.75 [10,1]0.75 - synapse from column 10, cell #1, strength 0.75 [11,1]0.75 - synapse from column 11, cell #1, strength 0.75'
def debugPrint(self):
print ('ID:%-5d' % self.segID), if self.isSequenceSeg: print 'True', else: print 'False', print ('%9.7f' % self.dutyCycle(readOnly=True)), print ('(%4d/%-4d)' % (self.positiveActivations, self.totalActivations)), print ('%4d' % (self.tm.lrnIterationIdx - self.lastActiveIteration)), sortedSyns = sorted(self.syns) for (_, synapse) in enumerate(sortedSyns): print ('[%d,%d]%4.2f' % (synapse[0], synapse[1], synapse[2])), print
'Free up some synapses in this segment. We always free up inactive synapses (lowest permanence freed up first) before we start to free up active ones. :param numToFree number of synapses to free up :param inactiveSynapseIndices list of the inactive synapse indices.'
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity=0):
assert (numToFree <= len(self.syns)) if (verbosity >= 4): print '\nIn PY freeNSynapses with numToFree =', numToFree, print 'inactiveSynapseIndices =', for i in inactiveSynapseIndices: print self.syns[i][0:2], print if (len(inactiveSynapseIndices) > 0): perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices]) candidates = numpy.array(inactiveSynapseIndices)[perms.argsort()[0:numToFree]] candidates = list(candidates) else: candidates = [] if (len(candidates) < numToFree): activeSynIndices = [i for i in xrange(len(self.syns)) if (i not in inactiveSynapseIndices)] perms = numpy.array([self.syns[i][2] for i in activeSynIndices]) moreToFree = (numToFree - len(candidates)) moreCandidates = numpy.array(activeSynIndices)[perms.argsort()[0:moreToFree]] candidates += list(moreCandidates) if (verbosity >= 4): print ('Deleting %d synapses from segment to make room for new ones:' % len(candidates)), candidates print 'BEFORE:', self.debugPrint() synsToDelete = [self.syns[i] for i in candidates] for syn in synsToDelete: self.syns.remove(syn) if (verbosity >= 4): print 'AFTER:', self.debugPrint()
'Add a new synapse :param srcCellCol source cell column :param srcCellIdx source cell index within the column :param perm initial permanence'
def addSynapse(self, srcCellCol, srcCellIdx, perm):
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
'Update a set of synapses in the segment. :param tm The owner TM :param synapses List of synapse indices to update :param delta How much to add to each permanence :returns: True if synapse reached 0'
def updateSynapses(self, synapses, delta):
reached0 = False if (delta > 0): for synapse in synapses: self.syns[synapse][2] = newValue = (self.syns[synapse][2] + delta) if (newValue > self.tm.permanenceMax): self.syns[synapse][2] = self.tm.permanenceMax else: for synapse in synapses: self.syns[synapse][2] = newValue = (self.syns[synapse][2] + delta) if (newValue <= 0): self.syns[synapse][2] = 0 reached0 = True return reached0
'Initialize ephemeral instance variables (those that aren\'t serialized)'
def __constructEphemeralInstanceVars(self):
assert (not hasattr(self, 'ephemeral')) self.ephemeral = DictObj() self.ephemeral.logPathInput = '' self.ephemeral.logPathOutput = '' self.ephemeral.logPathOutputDense = '' self.ephemeral._fpLogInput = None self.ephemeral._fpLogOutput = None self.ephemeral._fpLogOutputDense = None return
'Called by network after all links have been set up'
def initialize(self):
self.identityPolicy.initialize(self) _debugOut(self.identityPolicy.getName())
'Run one iteration of the region\'s compute. The guts of the compute are contained in the _compute() call so that we can profile it if requested.'
def compute(self, inputs, outputs):
self.identityPolicy.compute(inputs, outputs) _debugOut(('%s: inputs=%s; outputs=%s' % (self.identityPolicy.getName(), inputs, outputs))) return
'Return the base Spec for TestRegion.'
@classmethod def getSpec(cls):
spec = dict(description='TestRegion', singleNodeOnly=True, inputs=dict(bottomUpIn=dict(description='The input vector.', dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=True, requireSplitterMap=False), topDownIn=dict(description='The top-down input signal, generated from\n feedback from upper levels', dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(bottomUpOut=dict(description='The output signal generated from the bottom-up inputs\n from lower levels.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), topDownOut=dict(description='The top-down output signal, generated from\n feedback from upper levels', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False)), parameters=dict(logPathInput=dict(description='Optional name of input log file. If set, every input vector will be logged to this file.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), logPathOutput=dict(description='Optional name of output log file. If set, every output vector will be logged to this file.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), logPathOutputDense=dict(description='Optional name of output log file. If set, every output vector will be logged to this file as a dense vector.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), breakPdb=dict(description='Set to 1 to stop in the pdb debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), breakKomodo=dict(description='Set to 1 to stop in the Komodo debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite')), commands=dict(setIdentityPolicyInstance=dict(description=(('Set identity policy instance BERORE running the network. ' + "The instance MUST be derived from TestRegion's ") + 'RegionIdentityPolicyBase class.')), getIdentityPolicyInstance=dict(description=(('Returns identity policy instance that was associated with ' + 'the TestRegion instance via the setIdentityPolicyInstance ') + 'command.')))) return spec
'Get the value of a NodeSpec parameter. Most parameters are handled automatically by PyRegion\'s parameter get mechanism. The ones that need special treatment are explicitly handled here.'
def getParameter(self, parameterName, index=(-1)):
assert (not ((parameterName in self.__dict__) and (parameterName in self.ephemeral))) if (parameterName in self.ephemeral): assert (parameterName not in self.__dict__) return self.ephemeral[parameterName] else: return super(PyRegion, self).getParameter(parameterName, index)
'Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion\'s parameter set mechanism. The ones that need special treatment are explicitly handled here.'
def setParameter(self, parameterName, index, parameterValue):
assert (not ((parameterName in self.__dict__) and (parameterName in self.ephemeral))) if (parameterName in self.ephemeral): if (parameterName == 'logPathInput'): self.ephemeral.logPathInput = parameterValue if self.ephemeral._fpLogInput: self.ephemeral._fpLogInput.close() self.ephemeral._fpLogInput = None if parameterValue: self.ephemeral._fpLogInput = open(self.ephemeral.logPathInput, 'w') elif (parameterName == 'logPathOutput'): self.ephemeral.logPathOutput = parameterValue if self.ephemeral._fpLogOutput: self.ephemeral._fpLogOutput.close() self.ephemeral._fpLogOutput = None if parameterValue: self.ephemeral._fpLogOutput = open(self.ephemeral.logPathOutput, 'w') elif (parameterName == 'logPathOutputDense'): self.ephemeral.logPathOutputDense = parameterValue if self.ephemeral._fpLogOutputDense: self.ephemeral._fpLogOutputDense.close() self.ephemeral._fpLogOutputDense = None if parameterValue: self.ephemeral._fpLogOutputDense = open(self.ephemeral.logPathOutputDense, 'w') else: raise Exception(('Unknown parameter: ' + parameterName)) return
'TestRegion command that sets identity policy instance. The instance MUST be derived from TestRegion\'s RegionIdentityPolicyBase class. Users MUST set the identity instance BEFORE running the network Exception: AssertionError if identity policy instance has already been set or if the passed-in instance is not derived from RegionIdentityPolicyBase.'
def setIdentityPolicyInstance(self, identityPolicyObj):
assert (not self.identityPolicy) assert isinstance(identityPolicyObj, RegionIdentityPolicyBase) self.identityPolicy = identityPolicyObj return
'TestRegion command that returns the identity policy instance that was associated with this TestRegion instance via setIdentityPolicyInstance(). Returns: a RegionIdentityPolicyBase-based instance that was associated with this TestRegion intstance. Exception: AssertionError if no identity policy instance has been set.'
def getIdentityPolicyInstance(self):
assert self.identityPolicy return self.identityPolicy
'Save the region\'s state. The ephemerals and identity policy are excluded from the saved state. :param proto: an instance of TestRegionProto to serialize'
def write(self, proto):
proto.breakPdb = self.breakPdb proto.breakKomodo = self.breakKomodo
'Load the state from the given proto instance. The saved state does not include the identity policy so this must be constructed and set after the region is deserialized. This can be done by calling \'setIdentityPolicyInstance\'. :param proto: an instance of TestRegionProto to load state from'
def read(self, proto):
self.breakPdb = proto.breakPdb self.breakKomodo = proto.breakKomodo self.__constructEphemeralInstanceVars()
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() state.pop('ephemeral') return state
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
assert ('ephemeral' not in state) self.__dict__.update(state) self.__constructEphemeralInstanceVars() return
'Initialize all ephemeral data members, and give the derived class the opportunity to do the same by invoking the virtual member _initEphemerals(), which is intended to be overridden. NOTE: this is used by both __init__ and __setstate__ code paths.'
def _initializeEphemeralMembers(self):
for attrName in self._getEphemeralMembersBase(): if (attrName != '_loaded'): if hasattr(self, attrName): if self._loaded: pass else: print self.__class__.__name__, ("contains base class member '%s'" % attrName) if (not self._loaded): for attrName in self._getEphemeralMembersBase(): if (attrName != '_loaded'): assert (not hasattr(self, attrName)) else: assert hasattr(self, attrName) self._profileObj = None self._iterations = 0 self._initEphemerals() self._checkEphemeralMembers()
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.initialize`.'
def initialize(self):
self._spatialPoolerOutput = numpy.zeros(self.columnCount, dtype=GetNTAReal()) self._spatialPoolerInput = numpy.zeros((1, self.inputWidth), dtype=GetNTAReal()) self._allocateSpatialFDR(None)
'Allocate the spatial pooler instance.'
def _allocateSpatialFDR(self, rfInput):
if self._sfdr: return autoArgs = dict(((name, getattr(self, name)) for name in self._spatialArgNames)) if ((self.SpatialClass == CPPSpatialPooler) or (self.SpatialClass == PYSpatialPooler)): autoArgs['columnDimensions'] = [self.columnCount] autoArgs['inputDimensions'] = [self.inputWidth] autoArgs['potentialRadius'] = self.inputWidth self._sfdr = self.SpatialClass(**autoArgs)
'Run one iteration, profiling it if requested. :param inputs: (dict) mapping region input names to numpy.array values :param outputs: (dict) mapping region output names to numpy.arrays that should be populated with output values by this method'
def compute(self, inputs, outputs):
if (False and self.learningMode and (self._iterations > 0) and (self._iterations <= 10)): import hotshot if (self._iterations == 10): print '\n Collecting and sorting internal node profiling stats generated by hotshot...' stats = hotshot.stats.load('hotshot.stats') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats() if (self._profileObj is None): print '\n Preparing to capture profile using hotshot...' if os.path.exists('hotshot.stats'): os.remove('hotshot.stats') self._profileObj = hotshot.Profile('hotshot.stats', 1, 1) self._profileObj.runcall(self._compute, *[inputs, outputs]) else: self._compute(inputs, outputs)
'Run one iteration of SPRegion\'s compute'
def _compute(self, inputs, outputs):
if (self._sfdr is None): raise RuntimeError('Spatial pooler has not been initialized') if (not self.topDownMode): self._iterations += 1 buInputVector = inputs['bottomUpIn'] resetSignal = False if ('resetIn' in inputs): assert (len(inputs['resetIn']) == 1) resetSignal = (inputs['resetIn'][0] != 0) rfOutput = self._doBottomUpCompute(rfInput=buInputVector.reshape((1, buInputVector.size)), resetSignal=resetSignal) outputs['bottomUpOut'][:] = rfOutput.flat else: topDownIn = inputs.get('topDownIn', None) (spatialTopDownOut, temporalTopDownOut) = self._doTopDownInfer(topDownIn) outputs['spatialTopDownOut'][:] = spatialTopDownOut if (temporalTopDownOut is not None): outputs['temporalTopDownOut'][:] = temporalTopDownOut outputs['anomalyScore'][:] = 0
'Do one iteration of inference and/or learning and return the result Parameters: rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted'
def _doBottomUpCompute(self, rfInput, resetSignal):
self._conditionalBreak() self._spatialPoolerInput = rfInput.reshape((-1)) assert (rfInput.shape[0] == 1) inputVector = numpy.array(rfInput[0]).astype('uint32') outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32') self._sfdr.compute(inputVector, self.learningMode, outputVector) self._spatialPoolerOutput[:] = outputVector[:] if self._fpLogSP: output = self._spatialPoolerOutput.reshape((-1)) outputNZ = output.nonzero()[0] outStr = ' '.join([('%d' % int(token)) for token in outputNZ]) print >>self._fpLogSP, output.size, outStr if self._fpLogSPInput: output = rfInput.reshape((-1)) outputNZ = output.nonzero()[0] outStr = ' '.join([('%d' % int(token)) for token in outputNZ]) print >>self._fpLogSPInput, output.size, outStr return self._spatialPoolerOutput
'Do one iteration of top-down inference. Parameters: tdInput: Top-down input retval: (spatialTopDownOut, temporalTopDownOut) spatialTopDownOut is the top down output computed only from the SP, using it\'s current bottom-up output. temporalTopDownOut is the top down output computed from the topDown in of the level above us.'
def _doTopDownInfer(self, topDownInput=None):
return (None, None)
'Doesn\'t include the spatial, temporal and other parameters :returns: (dict) The base Spec for SPRegion.'
@classmethod def getBaseSpec(cls):
spec = dict(description=SPRegion.__doc__, singleNodeOnly=True, inputs=dict(bottomUpIn=dict(description='The input vector.', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), resetIn=dict(description='A boolean flag that indicates whether\n or not the input vector received in this compute cycle\n represents the start of a new temporal sequence.', dataType='Real32', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), topDownIn=dict(description='The top-down input signal, generated from\n feedback from upper levels', dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), sequenceIdIn=dict(description='Sequence ID', dataType='UInt64', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(bottomUpOut=dict(description='The output signal generated from the bottom-up inputs\n from lower levels.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), topDownOut=dict(description='The top-down output signal, generated from\n feedback from upper levels', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), spatialTopDownOut=dict(description='The top-down output, generated only from the current\n SP output. This can be used to evaluate how well the\n SP is representing the inputs independent of the TM.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), temporalTopDownOut=dict(description='The top-down output, generated only from the current\n TM output feedback down through the SP.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), anomalyScore=dict(description="The score for how 'anomalous' (i.e. rare) this spatial\n input pattern is. Higher values are increasingly rare", dataType='Real32', count=1, regionLevel=True, isDefaultOutput=False)), parameters=dict(breakPdb=dict(description='Set to 1 to stop in the pdb debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), breakKomodo=dict(description='Set to 1 to stop in the Komodo debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'))) return spec
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`. The parameters collection is constructed based on the parameters specified by the various components (spatialSpec, temporalSpec and otherSpec)'
@classmethod def getSpec(cls):
spec = cls.getBaseSpec() (s, o) = _getAdditionalSpecs(spatialImp=getDefaultSPImp()) spec['parameters'].update(s) spec['parameters'].update(o) return spec
':returns: (:class:`~nupic.algorithms.spatial_pooler.SpatialPooler`) instance of the underlying algorithm object.'
def getAlgorithmInstance(self):
return self._sfdr
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`. Most parameters are handled automatically by PyRegion\'s parameter get mechanism. The ones that need special treatment are explicitly handled here.'
def getParameter(self, parameterName, index=(-1)):
if (parameterName == 'activeOutputCount'): return self.columnCount elif (parameterName == 'spatialPoolerInput'): return list(self._spatialPoolerInput.reshape((-1))) elif (parameterName == 'spatialPoolerOutput'): return list(self._spatialPoolerOutput) elif (parameterName == 'spNumActiveOutputs'): return len(self._spatialPoolerOutput.nonzero()[0]) elif (parameterName == 'spOutputNonZeros'): return ([len(self._spatialPoolerOutput)] + list(self._spatialPoolerOutput.nonzero()[0])) elif (parameterName == 'spInputNonZeros'): import pdb pdb.set_trace() return ([len(self._spatialPoolerInput)] + list(self._spatialPoolerInput.nonzero()[0])) elif (parameterName == 'spLearningStatsStr'): try: return str(self._sfdr.getLearningStats()) except: return str(dict()) else: return PyRegion.getParameter(self, parameterName, index)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`. Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion\'s parameter set mechanism. The ones that need special treatment are explicitly handled here.'
def setParameter(self, parameterName, index, parameterValue):
if (parameterName in self._spatialArgNames): setattr(self._sfdr, parameterName, parameterValue) elif (parameterName == 'logPathInput'): self.logPathInput = parameterValue if self._fpLogSPInput: self._fpLogSPInput.close() self._fpLogSPInput = None if parameterValue: self._fpLogSPInput = open(self.logPathInput, 'w') elif (parameterName == 'logPathOutput'): self.logPathOutput = parameterValue if self._fpLogSP: self._fpLogSP.close() self._fpLogSP = None if parameterValue: self._fpLogSP = open(self.logPathOutput, 'w') elif (parameterName == 'logPathOutputDense'): self.logPathOutputDense = parameterValue if self._fpLogSPDense: self._fpLogSPDense.close() self._fpLogSPDense = None if parameterValue: self._fpLogSPDense = open(self.logPathOutputDense, 'w') elif hasattr(self, parameterName): setattr(self, parameterName, parameterValue) else: raise Exception(('Unknown parameter: ' + parameterName))
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.'
@staticmethod def getSchema():
return SPRegionProto
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`. Write state to proto object. :param proto: SPRegionProto capnproto object'
def writeToProto(self, proto):
proto.spatialImp = self.spatialImp proto.columnCount = self.columnCount proto.inputWidth = self.inputWidth proto.learningMode = (1 if self.learningMode else 0) proto.inferenceMode = (1 if self.inferenceMode else 0) proto.anomalyMode = (1 if self.anomalyMode else 0) proto.topDownMode = (1 if self.topDownMode else 0) self._sfdr.write(proto.spatialPooler)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. Read state from proto object. :param proto: SPRegionProto capnproto object'
@classmethod def readFromProto(cls, proto):
instance = cls(proto.columnCount, proto.inputWidth) instance.spatialImp = proto.spatialImp instance.learningMode = proto.learningMode instance.inferenceMode = proto.inferenceMode instance.anomalyMode = proto.anomalyMode instance.topDownMode = proto.topDownMode spatialImp = proto.spatialImp instance._sfdr = getSPClass(spatialImp).read(proto.spatialPooler) return instance
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() for ephemeralMemberName in self._getEphemeralMembersAll(): state.pop(ephemeralMemberName, None) return state
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
self.__dict__.update(state) self._loaded = True if (not hasattr(self, 'SpatialClass')): self.SpatialClass = self._sfdr.__class__ self._initializeEphemeralMembers() self._allocateSpatialFDR(None)
'Initialize all ephemerals used by derived classes.'
def _initEphemerals(self):
if (hasattr(self, '_sfdr') and self._sfdr): self._spatialPoolerOutput = numpy.zeros(self.columnCount, dtype=GetNTAReal()) else: self._spatialPoolerOutput = None self._fpLogSPInput = None self._fpLogSP = None self._fpLogSPDense = None self.logPathInput = '' self.logPathOutput = '' self.logPathOutputDense = ''
'Callback that returns a list of all "ephemeral" members (i.e., data members that should not and/or cannot be pickled.)'
def _getEphemeralMembers(self):
return ['_spatialPoolerOutput', '_fpLogSP', '_fpLogSPDense', 'logPathInput', 'logPathOutput', 'logPathOutputDense']
'Returns list of all ephemeral members.'
def _getEphemeralMembersBase(self):
return ['_loaded', '_profileObj', '_iterations']
'Returns a concatenated list of both the standard base class ephemeral members, as well as any additional ephemeral members (e.g., file handles, etc.).'
def _getEphemeralMembersAll(self):
return (self._getEphemeralMembersBase() + self._getEphemeralMembers())
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`. TODO: as a temporary hack, getParameterArrayCount checks to see if there\'s a variable, private or not, with that name. If so, it returns the value of the variable.'
def getParameterArrayCount(self, name, index):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) return len(p)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`. TODO: as a temporary hack, getParameterArray checks to see if there\'s a variable, private or not, with that name. If so, it returns the value of the variable.'
def getParameterArray(self, name, index, a):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) if (len(p) > 0): a[:] = p[:]
'Verify the validity of the node spec object The type of each sub-object is verified and then the validity of each node spec item is verified by calling it invariant() method. It also makes sure that there is at most one default input and one default output.'
def invariant(self):
assert isinstance(self.description, str) assert isinstance(self.singleNodeOnly, bool) assert isinstance(self.inputs, dict) assert isinstance(self.outputs, dict) assert isinstance(self.parameters, dict) assert isinstance(self.commands, dict) hasDefaultInput = False for (k, v) in self.inputs.items(): assert isinstance(k, str) assert isinstance(v, InputSpec) v.invariant() if v.isDefaultInput: assert (not hasDefaultInput) hasDefaultInput = True hasDefaultOutput = False for (k, v) in self.outputs.items(): assert isinstance(k, str) assert isinstance(v, OutputSpec) v.invariant() if v.isDefaultOutput: assert (not hasDefaultOutput) hasDefaultOutput = True for (k, v) in self.parameters.items(): assert isinstance(k, str) assert isinstance(v, ParameterSpec) v.invariant() for (k, v) in self.commands.items(): assert isinstance(k, str) assert isinstance(v, CommandSpec) v.invariant()
'Convert the information of the node spec to a plain dict of basic types The description and singleNodeOnly attributes are placed directly in the result dicts. The inputs, outputs, parameters and commands dicts contain Spec item objects (InputSpec, OutputSpec, etc). Each such object is converted also to a plain dict using the internal items2dict() function (see bellow).'
def toDict(self):
def items2dict(items): 'Convert a dict of node spec items to a plain dict\n\n Each node spec item object will be converted to a dict of its\n attributes. The entire items dict will become a dict of dicts (same keys).\n ' d = {} for (k, v) in items.items(): d[k] = v.__dict__ return d self.invariant() return dict(description=self.description, singleNodeOnly=self.singleNodeOnly, inputs=items2dict(self.inputs), outputs=items2dict(self.outputs), parameters=items2dict(self.parameters), commands=items2dict(self.commands))
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.'
@classmethod def getSpec(cls):
ns = dict(description=KNNClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict(categoryIn=dict(description='Vector of zero or more category indices for this inputsample. -1 implies no category.', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), bottomUpIn=dict(description="Belief values over children's groups", dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), partitionIn=dict(description='Partition ID of the input sample', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), auxDataIn=dict(description='Auxiliary data from the sensor', dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(categoriesOut=dict(description='A vector representing, for each category index, the likelihood that the input to the node belongs to that category based on the number of neighbors of that category that are among the nearest K.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), bestPrototypeIndices=dict(description='A vector that lists, in descending order of the match, the positions of the prototypes that best match the input pattern.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), categoryProbabilitiesOut=dict(description='A vector representing, for each category index, the probability that the input to the node belongs to that category based on the distance to the nearest neighbor of each category.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True)), parameters=dict(learningMode=dict(description='Boolean (0/1) indicating whether or not a region is in learning mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=1, accessMode='ReadWrite'), inferenceMode=dict(description='Boolean (0/1) indicating whether or not a region is in inference mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), acceptanceProbability=dict(description='During learning, inputs are learned with probability equal to this parameter. If set to 1.0, the default, all inputs will be considered (subject to other tests).', dataType='Real32', count=1, constraints='', defaultValue=1.0, accessMode='ReadWrite'), confusion=dict(description='Confusion matrix accumulated during inference. Reset with reset(). This is available to Python client code only.', dataType='Handle', count=2, constraints='', defaultValue=None, accessMode='Read'), activeOutputCount=dict(description='The number of active elements in the "categoriesOut" output.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Read'), categoryCount=dict(description='An integer indicating the number of categories that have been learned', dataType='UInt32', count=1, constraints='', defaultValue=None, accessMode='Read'), patternCount=dict(description='Number of patterns learned by the classifier.', dataType='UInt32', count=1, constraints='', defaultValue=None, accessMode='Read'), patternMatrix=dict(description='The actual patterns learned by the classifier, returned as a matrix.', dataType='Handle', count=1, constraints='', defaultValue=None, accessMode='Read'), k=dict(description='The number of nearest neighbors to use during inference.', dataType='UInt32', count=1, constraints='', defaultValue=1, accessMode='Create'), maxCategoryCount=dict(description='The maximal number of categories the classifier will distinguish between.', dataType='UInt32', count=1, constraints='', defaultValue=2, accessMode='Create'), distanceNorm=dict(description='The norm to use for a distance metric (i.e., the "p" in Lp-norm)', dataType='Real32', count=1, constraints='', defaultValue=2.0, accessMode='ReadWrite'), distanceMethod=dict(description='Method used to compute distances between inputs andprototypes. Possible options are norm, rawOverlap, pctOverlapOfLarger, and pctOverlapOfProto', dataType='Byte', count=0, constraints='enum: norm, rawOverlap, pctOverlapOfLarger, pctOverlapOfProto, pctOverlapOfInput', defaultValue='norm', accessMode='ReadWrite'), outputProbabilitiesByDist=dict(description='If True, categoryProbabilitiesOut is the probability of each category based on the distance to the nearest neighbor of each category. If False, categoryProbabilitiesOut is the percentage of neighbors among the top K that are of each category.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), distThreshold=dict(description='Distance Threshold. If a pattern that is less than distThreshold apart from the input pattern already exists in the KNN memory, then the input pattern is not added to KNN memory.', dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='ReadWrite'), inputThresh=dict(description='Input binarization threshold, used if "doBinarization" is True.', dataType='Real32', count=1, constraints='', defaultValue=0.5, accessMode='Create'), doBinarization=dict(description='Whether or not to binarize the input vectors.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), useSparseMemory=dict(description='A boolean flag that determines whether or not the KNNClassifier will use sparse Memory', dataType='UInt32', count=1, constraints='', defaultValue=1, accessMode='Create'), minSparsity=dict(description='If useSparseMemory is set, only vectors with sparsity >= minSparsity will be stored during learning. A value of 0.0 implies all vectors will be stored. A value of 0.1 implies only vectors with at least 10% sparsity will be stored', dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='ReadWrite'), sparseThreshold=dict(description='If sparse memory is used, input variables whose absolute value is less than this threshold will be stored as zero', dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='Create'), relativeThreshold=dict(description='Whether to multiply sparseThreshold by max value in input', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), winnerCount=dict(description='Only this many elements of the input are stored. All elements are stored if 0.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), doSphering=dict(description='A boolean indicating whether or not data shouldbe "sphered" (i.e. each dimension should be normalized suchthat its mean and variance are zero and one, respectively.) This sphering normalization would be performed after all training samples had been received but before inference was performed. The dimension-specific normalization constants would then be applied to all future incoming vectors prior to performing conventional NN inference.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), SVDSampleCount=dict(description='If not 0, carries out SVD transformation after that many samples have been seen.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), SVDDimCount=dict(description='Number of dimensions to keep after SVD if greater than 0. If set to -1 it is considered unspecified. If set to 0 it is consider "adaptive" and the number is chosen automatically.', dataType='Int32', count=1, constraints='', defaultValue=(-1), accessMode='Create'), fractionOfMax=dict(description='The smallest singular value which is retained as a fraction of the largest singular value. This is used only if SVDDimCount==0 ("adaptive").', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), useAuxiliary=dict(description='Whether or not the classifier should use auxiliary input data.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), justUseAuxiliary=dict(description='Whether or not the classifier should ONLUY use the auxiliary input data.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), verbosity=dict(description='An integer that controls the verbosity level, 0 means no verbose output, increasing integers provide more verbosity.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='ReadWrite'), keepAllDistances=dict(description='Whether to store all the protoScores in an array, rather than just the ones for the last inference. When this parameter is changed from True to False, all the scores are discarded except for the most recent one.', dataType='UInt32', count=1, constraints='bool', defaultValue=None, accessMode='ReadWrite'), replaceDuplicates=dict(description='A boolean flag that determines whether ornot the KNNClassifier should replace duplicatesduring learning. This should be on when onlinelearning.', dataType='UInt32', count=1, constraints='bool', defaultValue=None, accessMode='ReadWrite'), cellsPerCol=dict(description='If >= 1, we assume the input is organized into columns, in the same manner as the temporal memory AND whenever we store a new prototype, we only store the start cell (first cell) in any column which is bursting.colum ', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), maxStoredPatterns=dict(description='Limits the maximum number of the training patterns stored. When KNN learns in a fixed capacity mode, the unused patterns are deleted once the number of stored patterns is greater than maxStoredPatternscolumns. [-1 is no limit] ', dataType='Int32', count=1, constraints='', defaultValue=(-1), accessMode='Create')), commands=dict()) return ns
'List of attributes to not save with serialized state.'
def _getEphemeralAttributes(self):
return ['_firstComputeCall', '_accuracy', '_protoScores', '_categoryDistances']
'Initialize attributes that are not saved with the checkpoint.'
def _initEphemerals(self):
self._firstComputeCall = True self._accuracy = None self._protoScores = None self._categoryDistances = None self._knn = knn_classifier.KNNClassifier(**self.knnParams) for x in ('_partitions', '_useAuxiliary', '_doSphering', '_scanInfo', '_protoScores'): if (not hasattr(self, x)): setattr(self, x, None)
'Set state from serialized state.'
def __setstate__(self, state):
if ('version' not in state): self.__dict__.update(state) elif (state['version'] == 1): if ('doSelfValidation' in state): state.pop('doSelfValidation') knnState = state['_knn_state'] del state['_knn_state'] self.__dict__.update(state) self._initEphemerals() self._knn.__setstate__(knnState) else: raise RuntimeError('Invalid KNNClassifierRegion version for __setstate__') self.version = KNNClassifierRegion.__VERSION__
'Get serializable state.'
def __getstate__(self):
state = self.__dict__.copy() state['_knn_state'] = self._knn.__getstate__() del state['_knn'] for field in self._getEphemeralAttributes(): del state[field] return state
':returns: (:class:`~nupic.algorithms.knn_classifier.KNNClassifier`)'
def getAlgorithmInstance(self):
return self._knn
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.'
def getParameter(self, name, index=(-1)):
if (name == 'patternCount'): return self._knn._numPatterns elif (name == 'patternMatrix'): return self._getPatternMatrix() elif (name == 'k'): return self._knn.k elif (name == 'distanceNorm'): return self._knn.distanceNorm elif (name == 'distanceMethod'): return self._knn.distanceMethod elif (name == 'distThreshold'): return self._knn.distThreshold elif (name == 'inputThresh'): return self._knn.binarizationThreshold elif (name == 'doBinarization'): return self._knn.doBinarization elif (name == 'useSparseMemory'): return self._knn.useSparseMemory elif (name == 'sparseThreshold'): return self._knn.sparseThreshold elif (name == 'winnerCount'): return self._knn.numWinners elif (name == 'relativeThreshold'): return self._knn.relativeThreshold elif (name == 'SVDSampleCount'): v = self._knn.numSVDSamples return (v if (v is not None) else 0) elif (name == 'SVDDimCount'): v = self._knn.numSVDDims return (v if (v is not None) else 0) elif (name == 'fractionOfMax'): v = self._knn.fractionOfMax return (v if (v is not None) else 0) elif (name == 'useAuxiliary'): return self._useAuxiliary elif (name == 'justUseAuxiliary'): return self._justUseAuxiliary elif (name == 'doSphering'): return self._doSphering elif (name == 'cellsPerCol'): return self._knn.cellsPerCol elif (name == 'maxStoredPatterns'): return self.maxStoredPatterns elif (name == 'categoryRecencyList'): return self._knn._categoryRecencyList else: return PyRegion.getParameter(self, name, index)
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, name, index, value):
if (name == 'learningMode'): self.learningMode = bool(int(value)) self._epoch = 0 elif (name == 'inferenceMode'): self._epoch = 0 if (int(value) and (not self.inferenceMode)): self._finishLearning() self.inferenceMode = bool(int(value)) elif (name == 'distanceNorm'): self._knn.distanceNorm = value elif (name == 'distanceMethod'): self._knn.distanceMethod = value elif (name == 'keepAllDistances'): self.keepAllDistances = bool(value) if (not self.keepAllDistances): if ((self._protoScores is not None) and (self._protoScores.shape[0] > 1)): self._protoScores = self._protoScores[(-1), :] if (self._protoScores is not None): self._protoScoreCount = 1 else: self._protoScoreCount = 0 elif (name == 'verbosity'): self.verbosity = value self._knn.verbosity = value else: return PyRegion.setParameter(self, name, index, value)
'Resets confusion matrix.'
def reset(self):
self.confusion = numpy.zeros((1, 1))
'Begin writing output tap files. :param tapPath: (string) base name of the output tap files to write.'
def enableTap(self, tapPath):
self._tapFileIn = open((tapPath + '.in'), 'w') self._tapFileOut = open((tapPath + '.out'), 'w')
'Disable writing of output tap files.'
def disableTap(self):
if (self._tapFileIn is not None): self._tapFileIn.close() self._tapFileIn = None if (self._tapFileOut is not None): self._tapFileOut.close() self._tapFileOut = None
'Write inputs to output tap file. :param inputs: (iter) some inputs.'
def handleLogInput(self, inputs):
if (self._tapFileIn is not None): for input in inputs: for k in range(len(input)): print >>self._tapFileIn, input[k], print >>self._tapFileIn
'Write outputs to output tap file. :param outputs: (iter) some outputs.'
def handleLogOutput(self, output):
if (self._tapFileOut is not None): for k in range(len(output)): print >>self._tapFileOut, output[k], print >>self._tapFileOut
'Store a training sample and associated category label'
def _storeSample(self, inputVector, trueCatIndex, partition=0):
if (self._samples is None): self._samples = numpy.zeros((0, len(inputVector)), dtype=RealNumpyDType) assert (self._labels is None) self._labels = [] self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0) self._labels += [trueCatIndex] if (self._partitions is None): self._partitions = [] if (partition is None): partition = 0 self._partitions += [partition]
'Process one input sample. This method is called by the runtime engine. .. note:: the number of input categories may vary, but the array size is fixed to the max number of categories allowed (by a lower region), so "unused" indices of the input category array are filled with -1s. TODO: confusion matrix does not support multi-label classification :param inputs: (dict) mapping region input names to numpy.array values :param outputs: (dict) mapping region output names to numpy.arrays that should be populated with output values by this method'
def compute(self, inputs, outputs):
if (self._useAuxiliary is None): self._useAuxiliary = False if self._firstComputeCall: self._firstComputeCall = False if self._useAuxiliary: if (self._justUseAuxiliary == True): print ' Warning: You have chosen to ignore the image data and instead just use the auxiliary data stream.' inputVector = inputs['bottomUpIn'] if (self._useAuxiliary == True): auxVector = inputs['auxDataIn'] if (auxVector.dtype != numpy.float32): raise RuntimeError, 'KNNClassifierRegion expects numpy.float32 for the auxiliary data vector' if (self._justUseAuxiliary == True): inputVector = inputs['auxDataIn'] else: inputVector = numpy.concatenate([inputVector, inputs['auxDataIn']]) self.handleLogInput([inputVector]) assert ('categoryIn' in inputs), 'No linked category input.' categories = inputs['categoryIn'] if ('partitionIn' in inputs): assert (len(inputs['partitionIn']) == 1), 'Must have exactly one link to partition input.' partInput = inputs['partitionIn'] assert (len(partInput) == 1), 'Partition input element count must be exactly 1.' partition = int(partInput[0]) else: partition = None if self.inferenceMode: categoriesOut = outputs['categoriesOut'] probabilitiesOut = outputs['categoryProbabilitiesOut'] if self._doSphering: inputVector = ((inputVector + self._normOffset) * self._normScale) nPrototypes = 0 if ('bestPrototypeIndices' in outputs): bestPrototypeIndicesOut = outputs['bestPrototypeIndices'] nPrototypes = len(bestPrototypeIndicesOut) (winner, inference, protoScores, categoryDistances) = self._knn.infer(inputVector, partitionId=partition) if (not self.keepAllDistances): self._protoScores = protoScores elif (self._protoScores is None): self._protoScores = numpy.zeros((1, protoScores.shape[0]), protoScores.dtype) self._protoScores[0, :] = protoScores self._protoScoreCount = 1 else: if (self._protoScoreCount == self._protoScores.shape[0]): newProtoScores = numpy.zeros(((self._protoScores.shape[0] * 2), self._protoScores.shape[1]), self._protoScores.dtype) newProtoScores[:self._protoScores.shape[0], :] = self._protoScores self._protoScores = newProtoScores self._protoScores[self._protoScoreCount, :] = protoScores self._protoScoreCount += 1 self._categoryDistances = categoryDistances if self.outputProbabilitiesByDist: scores = (1.0 - self._categoryDistances) else: scores = inference total = scores.sum() if (total == 0): numScores = len(scores) probabilities = (numpy.ones(numScores) / numScores) else: probabilities = (scores / total) nout = min(len(categoriesOut), len(inference)) categoriesOut.fill(0) categoriesOut[0:nout] = inference[0:nout] probabilitiesOut.fill(0) probabilitiesOut[0:nout] = probabilities[0:nout] if (self.verbosity >= 1): print 'KNNRegion: categoriesOut: ', categoriesOut[0:nout] print 'KNNRegion: probabilitiesOut: ', probabilitiesOut[0:nout] if (self._scanInfo is not None): self._scanResults = [tuple(inference[:nout])] for category in categories: if (category >= 0): dims = max((int(category) + 1), len(inference)) oldDims = len(self.confusion) if (oldDims < dims): confusion = numpy.zeros((dims, dims)) confusion[0:oldDims, 0:oldDims] = self.confusion self.confusion = confusion self.confusion[(inference.argmax(), int(category))] += 1 if (nPrototypes > 1): bestPrototypeIndicesOut.fill(0) if (categoryDistances is not None): indices = categoryDistances.argsort() nout = min(len(indices), nPrototypes) bestPrototypeIndicesOut[0:nout] = indices[0:nout] elif (nPrototypes == 1): if ((categoryDistances is not None) and len(categoryDistances)): bestPrototypeIndicesOut[0] = categoryDistances.argmin() else: bestPrototypeIndicesOut[0] = 0 self.handleLogOutput(inference) if self.learningMode: if ((self.acceptanceProbability < 1.0) and (self._rgen.getReal64() > self.acceptanceProbability)): pass else: for category in categories: if (category >= 0): if self._doSphering: self._storeSample(inputVector, category, partition) else: self._knn.learn(inputVector, category, partition) self._epoch += 1
'Public API for returning the category list. This is a required API of the NearestNeighbor inspector. :returns: (list) which has one entry per stored prototype. The value of the entry is the category # of that stored prototype.'
def getCategoryList(self):
return self._knn._categoryList
'Removes a category. :param categoryToRemove: (string) label to remove'
def removeCategory(self, categoryToRemove):
return self._knn.removeCategory(categoryToRemove)
'Public API for returning the full scores (distance to each prototype) from the last :meth:`compute` inference call. This is a required API of the NearestNeighbor inspector. :returns: (list) which has one entry per stored prototype. The value of the entry is distance of the most recenty inferred input from the stored prototype.'
def getLatestDistances(self):
if (self._protoScores is not None): if self.keepAllDistances: return self._protoScores[(self._protoScoreCount - 1), :] else: return self._protoScores else: return None
'Like :meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances`, but returns all the scores if more than one set is available. :meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances` will always just return one set of scores. :returns: (list) all the prototype distances from all computes available.'
def getAllDistances(self):
if (self._protoScores is None): return None return self._protoScores[:self._protoScoreCount, :]
'Does nothing. Kept here for API compatibility'
def _finishLearning(self):
if self._doSphering: self._finishSphering() self._knn.finishLearning() self._accuracy = None
'Compute normalization constants for each feature dimension based on the collected training samples. Then normalize our training samples using these constants (so that each input dimension has mean and variance of zero and one, respectively.) Then feed these "sphered" training samples into the underlying SVM model.'
def _finishSphering(self):
self._normOffset = (self._samples.mean(axis=0) * (-1.0)) self._samples += self._normOffset variance = self._samples.var(axis=0) variance[numpy.where((variance == 0.0))] = 1.0 self._normScale = (1.0 / numpy.sqrt(variance)) self._samples *= self._normScale for sampleIndex in range(len(self._labels)): self._knn.learn(self._samples[sampleIndex], self._labels[sampleIndex], self._partitions[sampleIndex])
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'categoriesOut'): return self.maxCategoryCount elif (name == 'categoryProbabilitiesOut'): return self.maxCategoryCount elif (name == 'bestPrototypeIndices'): return (self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else 0) else: raise Exception(('Unknown output: ' + name))
'Reset the sensor to beginning of data.'
def rewind(self):
self._iterNum = 0 if (self.dataSource is not None): self.dataSource.rewind()
'Get the next record to encode. Includes getting a record from the `dataSource` and applying filters. If the filters request more data from the `dataSource` continue to get data from the `dataSource` until all filters are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can use a standalone :class:`.RecordSensor` to get filtered data.'
def getNextRecord(self):
allFiltersHaveEnoughData = False while (not allFiltersHaveEnoughData): data = self.dataSource.getNextRecordDict() if (not data): raise StopIteration('Datasource has no more data') if ('_reset' not in data): data['_reset'] = 0 if ('_sequenceId' not in data): data['_sequenceId'] = 0 if ('_category' not in data): data['_category'] = [None] (data, allFiltersHaveEnoughData) = self.applyFilters(data) self.lastRecord = data return data
'Apply pre-encoding filters. These filters may modify or add data. If a filter needs another record (e.g. a delta filter) it will request another record by returning False and the current record will be skipped (but will still be given to all filters). We have to be very careful about resets. A filter may add a reset, but other filters should not see the added reset, each filter sees the original reset value, and we keep track of whether any filter adds a reset. :param data: (dict) The data that will be processed by the filter. :returns: (tuple) with the data processed by the filter and a boolean to know whether or not the filter needs mode data.'
def applyFilters(self, data):
if (self.verbosity > 0): print ('RecordSensor got data: %s' % data) allFiltersHaveEnoughData = True if (len(self.preEncodingFilters) > 0): originalReset = data['_reset'] actualReset = originalReset for f in self.preEncodingFilters: filterHasEnoughData = f.process(data) allFiltersHaveEnoughData = (allFiltersHaveEnoughData and filterHasEnoughData) actualReset = (actualReset or data['_reset']) data['_reset'] = originalReset data['_reset'] = actualReset return (data, allFiltersHaveEnoughData)
'Populate the output array with the category indices. .. note:: Non-categories are represented with ``-1``. :param categories: (list) of category strings :param output: (list) category output, will be overwritten'
def populateCategoriesOut(self, categories, output):
if (categories[0] is None): output[:] = (-1) else: for (i, cat) in enumerate(categories[:len(output)]): output[i] = cat output[len(categories):] = (-1)
'Get a record from the dataSource and encode it. Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.compute`.'
def compute(self, inputs, outputs):
if (not self.topDownMode): data = self.getNextRecord() reset = data['_reset'] sequenceId = data['_sequenceId'] categories = data['_category'] self.encoder.encodeIntoArray(data, outputs['dataOut']) if ((self.predictedField is not None) and (self.predictedField != 'vector')): allEncoders = list(self.encoder.encoders) if (self.disabledEncoder is not None): allEncoders.extend(self.disabledEncoder.encoders) encoders = [e for e in allEncoders if (e[0] == self.predictedField)] if (len(encoders) == 0): raise ValueError(('There is no encoder for set for the predicted field: %s' % self.predictedField)) else: encoder = encoders[0][1] actualValue = data[self.predictedField] outputs['bucketIdxOut'][:] = encoder.getBucketIndices(actualValue) if isinstance(actualValue, str): outputs['actValueOut'][:] = encoder.getBucketIndices(actualValue) else: outputs['actValueOut'][:] = actualValue outputs['sourceOut'][:] = self.encoder.getScalars(data) self._outputValues['sourceOut'] = self.encoder.getEncodedValues(data) encoders = self.encoder.getEncoderList() prevOffset = 0 sourceEncodings = [] bitData = outputs['dataOut'] for encoder in encoders: nextOffset = (prevOffset + encoder.getWidth()) sourceEncodings.append(bitData[prevOffset:nextOffset]) prevOffset = nextOffset self._outputValues['sourceEncodings'] = sourceEncodings for filter in self.postEncodingFilters: filter.process(encoder=self.encoder, data=outputs['dataOut']) outputs['resetOut'][0] = reset outputs['sequenceIdOut'][0] = sequenceId self.populateCategoriesOut(categories, outputs['categoryOut']) if (self.verbosity >= 1): if (self._iterNum == 0): self.encoder.pprintHeader(prefix='sensor:') if reset: print ('RESET - sequenceID:%d' % sequenceId) if (self.verbosity >= 2): print if (self.verbosity >= 1): self.encoder.pprint(outputs['dataOut'], prefix=('%7d:' % self._iterNum)) scalarValues = self.encoder.getScalars(data) nz = outputs['dataOut'].nonzero()[0] print (' nz: (%d)' % len(nz)), nz print ' encIn:', self.encoder.scalarsToStr(scalarValues) if (self.verbosity >= 2): print ' data:', str(data) if (self.verbosity >= 3): decoded = self.encoder.decode(outputs['dataOut']) print 'decoded:', self.encoder.decodedToStr(decoded) self._iterNum += 1 else: spatialTopDownIn = inputs['spatialTopDownIn'] spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn) values = [elem.value for elem in spatialTopDownOut] scalars = [elem.scalar for elem in spatialTopDownOut] encodings = [elem.encoding for elem in spatialTopDownOut] self._outputValues['spatialTopDownOut'] = values outputs['spatialTopDownOut'][:] = numpy.array(scalars) self._outputValues['spatialTopDownEncodings'] = encodings temporalTopDownIn = inputs['temporalTopDownIn'] temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn) values = [elem.value for elem in temporalTopDownOut] scalars = [elem.scalar for elem in temporalTopDownOut] encodings = [elem.encoding for elem in temporalTopDownOut] self._outputValues['temporalTopDownOut'] = values outputs['temporalTopDownOut'][:] = numpy.array(scalars) self._outputValues['temporalTopDownEncodings'] = encodings assert (len(spatialTopDownOut) == len(temporalTopDownOut)), 'Error: spatialTopDownOut and temporalTopDownOut should be the same size'
'Converts all of the non-numeric fields from spatialOutput and temporalOutput into their scalar equivalents and records them in the output dictionary. :param spatialOutput: The results of topDownCompute() for the spatial input. :param temporalOutput: The results of topDownCompute() for the temporal input. :param output: The main dictionary of outputs passed to compute(). It is expected to have keys \'spatialTopDownOut\' and \'temporalTopDownOut\' that are mapped to numpy arrays.'
def _convertNonNumericData(self, spatialOutput, temporalOutput, output):
encoders = self.encoder.getEncoderList() types = self.encoder.getDecoderOutputFieldTypes() for (i, (encoder, type)) in enumerate(zip(encoders, types)): spatialData = spatialOutput[i] temporalData = temporalOutput[i] if ((type != FieldMetaType.integer) and (type != FieldMetaType.float)): spatialData = encoder.getScalars(spatialData)[0] temporalData = encoder.getScalars(temporalData)[0] assert isinstance(spatialData, (float, int)) assert isinstance(temporalData, (float, int)) output['spatialTopDownOut'][i] = spatialData output['temporalTopDownOut'][i] = temporalData
'.. note:: These are normal Python lists, rather than numpy arrays. This is to support lists with mixed scalars and strings, as in the case of records with categorical variables. :returns: (dict) output values.'
def getOutputValues(self, outputName):
return self._outputValues[outputName]
'Computes the width of dataOut. Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'resetOut'): print 'WARNING: getOutputElementCount should not have been called with resetOut' return 1 elif (name == 'sequenceIdOut'): print 'WARNING: getOutputElementCount should not have been called with sequenceIdOut' return 1 elif (name == 'dataOut'): if (self.encoder is None): raise Exception("NuPIC requested output element count for 'dataOut' on a RecordSensor node, but the encoder has not been set") return self.encoder.getWidth() elif (name == 'sourceOut'): if (self.encoder is None): raise Exception("NuPIC requested output element count for 'sourceOut' on a RecordSensor node, but the encoder has not been set") return len(self.encoder.getDescription()) elif (name == 'bucketIdxOut'): return 1 elif (name == 'actValueOut'): return 1 elif (name == 'categoryOut'): return self.numCategories elif ((name == 'spatialTopDownOut') or (name == 'temporalTopDownOut')): if (self.encoder is None): raise Exception("NuPIC requested output element count for 'sourceOut' on a RecordSensor node, but the encoder has not been set") return len(self.encoder.getDescription()) else: raise Exception(('Unknown output %s' % name))
'Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion\'s parameter set mechanism. The ones that need special treatment are explicitly handled here.'
def setParameter(self, parameterName, index, parameterValue):
if (parameterName == 'topDownMode'): self.topDownMode = parameterValue elif (parameterName == 'predictedField'): self.predictedField = parameterValue else: raise Exception(('Unknown parameter: ' + parameterName))
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSchema`.'
@staticmethod def getSchema():
return RecordSensorProto
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.'
def writeToProto(self, proto):
self.encoder.write(proto.encoder) if (self.disabledEncoder is not None): self.disabledEncoder.write(proto.disabledEncoder) proto.topDownMode = int(self.topDownMode) proto.verbosity = self.verbosity proto.numCategories = self.numCategories
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.'
@classmethod def readFromProto(cls, proto):
instance = cls() instance.encoder = MultiEncoder.read(proto.encoder) if (proto.disabledEncoder is not None): instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder) instance.topDownMode = bool(proto.topDownMode) instance.verbosity = proto.verbosity instance.numCategories = proto.numCategories return instance
'Initialize all ephemeral data members, and give the derived class the opportunity to do the same by invoking the virtual member _initEphemerals(), which is intended to be overridden.'
def _initialize(self):
for attrName in self._getEphemeralMembersBase(): if (attrName != '_loaded'): if hasattr(self, attrName): if self._loaded: pass else: print self.__class__.__name__, ("contains base class member '%s'" % attrName) if (not self._loaded): for attrName in self._getEphemeralMembersBase(): if (attrName != '_loaded'): assert (not hasattr(self, attrName)) else: assert hasattr(self, attrName) self._profileObj = None self._iterations = 0 self._initEphemerals() self._checkEphemeralMembers()
'Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.'
def initialize(self):
autoArgs = dict(((name, getattr(self, name)) for name in self._temporalArgNames)) if (self._tfdr is None): tpClass = _getTPClass(self.temporalImp) if (self.temporalImp in ['py', 'cpp', 'r', 'tm_py', 'tm_cpp', 'monitored_tm_py']): self._tfdr = tpClass(numberOfCols=self.columnCount, cellsPerColumn=self.cellsPerColumn, **autoArgs) else: raise RuntimeError('Invalid temporalImp')
'Run one iteration of :class:`~nupic.regions.tm_region.TMRegion` compute, profiling it if requested. :param inputs: (dict) mapping region input names to numpy.array values :param outputs: (dict) mapping region output names to numpy.arrays that should be populated with output values by this method'
def compute(self, inputs, outputs):
if (False and self.learningMode and (self._iterations > 0) and (self._iterations <= 10)): import hotshot if (self._iterations == 10): print '\n Collecting and sorting internal node profiling stats generated by hotshot...' stats = hotshot.stats.load('hotshot.stats') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats() if (self._profileObj is None): print '\n Preparing to capture profile using hotshot...' if os.path.exists('hotshot.stats'): os.remove('hotshot.stats') self._profileObj = hotshot.Profile('hotshot.stats', 1, 1) self._profileObj.runcall(self._compute, *[inputs, outputs]) else: self._compute(inputs, outputs)