desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Sets the permanence decrement. :param permanenceDecrement: (float) The permanence decrement.'
def setPermanenceDecrement(self, permanenceDecrement):
self.permanenceDecrement = permanenceDecrement
'Get the predicted segment decrement. :returns: (float) The predicted segment decrement.'
def getPredictedSegmentDecrement(self):
return self.predictedSegmentDecrement
'Sets the predicted segment decrement. :param predictedSegmentDecrement: (float) The predicted segment decrement.'
def setPredictedSegmentDecrement(self, predictedSegmentDecrement):
self.predictedSegmentDecrement = predictedSegmentDecrement
'Get the connected permanence. :returns: (float) The connected permanence.'
def getConnectedPermanence(self):
return self.connectedPermanence
'Sets the connected permanence. :param connectedPermanence: (float) The connected permanence.'
def setConnectedPermanence(self, connectedPermanence):
self.connectedPermanence = connectedPermanence
'Get the maximum number of segments per cell :returns: (int) max number of segments per cell'
def getMaxSegmentsPerCell(self):
return self.maxSegmentsPerCell
'Get the maximum number of synapses per segment. :returns: (int) max number of synapses per segment'
def getMaxSynapsesPerSegment(self):
return self.maxSynapsesPerSegment
'Writes serialized data to proto object. :param proto: (DynamicStructBuilder) Proto object'
def write(self, proto):
proto.columnDimensions = list(self.columnDimensions) proto.cellsPerColumn = self.cellsPerColumn proto.activationThreshold = self.activationThreshold proto.initialPermanence = self.initialPermanence proto.connectedPermanence = self.connectedPermanence proto.minThreshold = self.minThreshold proto.maxNewSynapseCount = self.maxNewSynapseCount proto.permanenceIncrement = self.permanenceIncrement proto.permanenceDecrement = self.permanenceDecrement proto.predictedSegmentDecrement = self.predictedSegmentDecrement proto.maxSegmentsPerCell = self.maxSegmentsPerCell proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment self.connections.write(proto.connections) self._random.write(proto.random) proto.activeCells = list(self.activeCells) proto.winnerCells = list(self.winnerCells) protoActiveSegments = proto.init('activeSegments', len(self.activeSegments)) for (i, segment) in enumerate(self.activeSegments): protoActiveSegments[i].cell = segment.cell idx = self.connections.segmentsForCell(segment.cell).index(segment) protoActiveSegments[i].idxOnCell = idx protoMatchingSegments = proto.init('matchingSegments', len(self.matchingSegments)) for (i, segment) in enumerate(self.matchingSegments): protoMatchingSegments[i].cell = segment.cell idx = self.connections.segmentsForCell(segment.cell).index(segment) protoMatchingSegments[i].idxOnCell = idx protoNumActivePotential = proto.init('numActivePotentialSynapsesForSegment', len(self.numActivePotentialSynapsesForSegment)) for (i, numActivePotentialSynapses) in enumerate(self.numActivePotentialSynapsesForSegment): segment = self.connections.segmentForFlatIdx(i) if (segment is not None): protoNumActivePotential[i].cell = segment.cell idx = self.connections.segmentsForCell(segment.cell).index(segment) protoNumActivePotential[i].idxOnCell = idx protoNumActivePotential[i].number = numActivePotentialSynapses proto.iteration = self.iteration protoLastUsedIteration = proto.init('lastUsedIterationForSegment', len(self.numActivePotentialSynapsesForSegment)) for (i, lastUsed) in enumerate(self.lastUsedIterationForSegment): segment = self.connections.segmentForFlatIdx(i) if (segment is not None): protoLastUsedIteration[i].cell = segment.cell idx = self.connections.segmentsForCell(segment.cell).index(segment) protoLastUsedIteration[i].idxOnCell = idx protoLastUsedIteration[i].number = lastUsed
'Reads deserialized data from proto object. :param proto: (DynamicStructBuilder) Proto object :returns: (:class:TemporalMemory) TemporalMemory instance'
@classmethod def read(cls, proto):
tm = object.__new__(cls) tm.columnDimensions = tuple(proto.columnDimensions) tm.cellsPerColumn = int(proto.cellsPerColumn) tm.activationThreshold = int(proto.activationThreshold) tm.initialPermanence = proto.initialPermanence tm.connectedPermanence = proto.connectedPermanence tm.minThreshold = int(proto.minThreshold) tm.maxNewSynapseCount = int(proto.maxNewSynapseCount) tm.permanenceIncrement = proto.permanenceIncrement tm.permanenceDecrement = proto.permanenceDecrement tm.predictedSegmentDecrement = proto.predictedSegmentDecrement tm.maxSegmentsPerCell = int(proto.maxSegmentsPerCell) tm.maxSynapsesPerSegment = int(proto.maxSynapsesPerSegment) tm.connections = Connections.read(proto.connections) tm._random = Random() tm._random.read(proto.random) tm.activeCells = [int(x) for x in proto.activeCells] tm.winnerCells = [int(x) for x in proto.winnerCells] flatListLength = tm.connections.segmentFlatListLength() tm.numActiveConnectedSynapsesForSegment = ([0] * flatListLength) tm.numActivePotentialSynapsesForSegment = ([0] * flatListLength) tm.lastUsedIterationForSegment = ([0] * flatListLength) tm.activeSegments = [] tm.matchingSegments = [] for protoSegment in proto.activeSegments: tm.activeSegments.append(tm.connections.getSegment(protoSegment.cell, protoSegment.idxOnCell)) for protoSegment in proto.matchingSegments: tm.matchingSegments.append(tm.connections.getSegment(protoSegment.cell, protoSegment.idxOnCell)) for protoSegment in proto.numActivePotentialSynapsesForSegment: segment = tm.connections.getSegment(protoSegment.cell, protoSegment.idxOnCell) tm.numActivePotentialSynapsesForSegment[segment.flatIdx] = int(protoSegment.number) tm.iteration = long(proto.iteration) for protoSegment in proto.lastUsedIterationForSegment: segment = tm.connections.getSegment(protoSegment.cell, protoSegment.idxOnCell) tm.lastUsedIterationForSegment[segment.flatIdx] = long(protoSegment.number) return tm
'Non-equality operator for TemporalMemory instances. Checks if two instances are functionally identical (might have different internal state). :param other: (TemporalMemory) TemporalMemory instance to compare to'
def __eq__(self, other):
if (self.columnDimensions != other.columnDimensions): return False if (self.cellsPerColumn != other.cellsPerColumn): return False if (self.activationThreshold != other.activationThreshold): return False if (abs((self.initialPermanence - other.initialPermanence)) > EPSILON): return False if (abs((self.connectedPermanence - other.connectedPermanence)) > EPSILON): return False if (self.minThreshold != other.minThreshold): return False if (self.maxNewSynapseCount != other.maxNewSynapseCount): return False if (abs((self.permanenceIncrement - other.permanenceIncrement)) > EPSILON): return False if (abs((self.permanenceDecrement - other.permanenceDecrement)) > EPSILON): return False if (abs((self.predictedSegmentDecrement - other.predictedSegmentDecrement)) > EPSILON): return False if (self.connections != other.connections): return False if (self.activeCells != other.activeCells): return False if (self.winnerCells != other.winnerCells): return False if (self.matchingSegments != other.matchingSegments): return False if (self.activeSegments != other.activeSegments): return False return True
'Non-equality operator for TemporalMemory instances. Checks if two instances are not functionally identical (might have different internal state). :param other: (TemporalMemory) TemporalMemory instance to compare to'
def __ne__(self, other):
return (not self.__eq__(other))
'Raises an error if column index is invalid. :param column: (int) Column index'
def _validateColumn(self, column):
if ((column >= self.numberOfColumns()) or (column < 0)): raise IndexError('Invalid column')
'Raises an error if cell index is invalid. :param cell: (int) Cell index'
def _validateCell(self, cell):
if ((cell >= self.numberOfCells()) or (cell < 0)): raise IndexError('Invalid cell')
'Returns the indices of the cells passed in. :param cells: (list) cells to find the indices of'
@classmethod def getCellIndices(cls, cells):
return [cls.getCellIndex(c) for c in cells]
'Returns the index of the cell. :param cell: (int) cell to find the index of'
@staticmethod def getCellIndex(cell):
return cell
'Create a SDR classifier factory. The implementation of the SDR Classifier can be specified with the "implementation" keyword argument. The SDRClassifierFactory uses the implementation as specified in `Default NuPIC Configuration <default-config.html>`_.'
@staticmethod def create(*args, **kwargs):
impl = kwargs.pop('implementation', None) if (impl is None): impl = Configuration.get('nupic.opf.sdrClassifier.implementation') if (impl == 'py'): return SDRClassifier(*args, **kwargs) elif (impl == 'cpp'): return FastSDRClassifier(*args, **kwargs) elif (impl == 'diff'): return SDRClassifierDiff(*args, **kwargs) else: raise ValueError(('Invalid classifier implementation (%r). Value must be "py", "cpp" or "diff".' % impl))
':param proto: SDRClassifierRegionProto capnproto object'
@staticmethod def read(proto):
impl = proto.implementation if (impl == 'py'): return SDRClassifier.read(proto.sdrClassifier) elif (impl == 'cpp'): return FastSDRClassifier.read(proto.sdrClassifier) elif (impl == 'diff'): return SDRClassifierDiff.read(proto.sdrClassifier) else: raise ValueError(('Invalid classifier implementation (%r). Value must be "py", "cpp" or "diff".' % impl))
'Clears the state of the KNNClassifier.'
def clear(self):
self._Memory = None self._numPatterns = 0 self._M = None self._categoryList = [] self._partitionIdList = [] self._partitionIdMap = {} self._finishedLearning = False self._iterationIdx = (-1) if (self.maxStoredPatterns > 0): assert self.useSparseMemory, 'Fixed capacity KNN is implemented only in the sparse memory mode' self.fixedCapacity = True self._categoryRecencyList = [] else: self.fixedCapacity = False self._protoSizes = None self._s = None self._vt = None self._nc = None self._mean = None self._specificIndexTraining = False self._nextTrainingIndices = None
'Allows ids to be assigned a category and subsequently enables users to use: - :meth:`~.KNNClassifier.KNNClassifier.removeCategory` - :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern` - :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern`'
def prototypeSetCategory(self, idToCategorize, newCategory):
if (idToCategorize not in self._categoryRecencyList): return recordIndex = self._categoryRecencyList.index(idToCategorize) self._categoryList[recordIndex] = newCategory
'There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. :param idsToRemove: A list of row indices to remove.'
def removeIds(self, idsToRemove):
rowsToRemove = [k for (k, rowID) in enumerate(self._categoryRecencyList) if (rowID in idsToRemove)] self._removeRows(rowsToRemove)
'There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. :param categoryToRemove: Category label to remove'
def removeCategory(self, categoryToRemove):
removedRows = 0 if (self._Memory is None): return removedRows catToRemove = float(categoryToRemove) rowsToRemove = [k for (k, catID) in enumerate(self._categoryList) if (catID == catToRemove)] self._removeRows(rowsToRemove) assert (catToRemove not in self._categoryList)
'A list of row indices to remove. There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed.'
def _removeRows(self, rowsToRemove):
removalArray = numpy.array(rowsToRemove) self._categoryList = numpy.delete(numpy.array(self._categoryList), removalArray).tolist() if self.fixedCapacity: self._categoryRecencyList = numpy.delete(numpy.array(self._categoryRecencyList), removalArray).tolist() for row in reversed(rowsToRemove): self._partitionIdList.pop(row) self._rebuildPartitionIdMap(self._partitionIdList) if self.useSparseMemory: for rowIndex in rowsToRemove[::(-1)]: self._Memory.deleteRow(rowIndex) else: self._M = numpy.delete(self._M, removalArray, 0) numRemoved = len(rowsToRemove) numRowsExpected = (self._numPatterns - numRemoved) if self.useSparseMemory: if (self._Memory is not None): assert (self._Memory.nRows() == numRowsExpected) else: assert (self._M.shape[0] == numRowsExpected) assert (len(self._categoryList) == numRowsExpected) self._numPatterns -= numRemoved return numRemoved
'Utility method to increment the iteration index. Intended for models that don\'t learn each timestep.'
def doIteration(self):
self._iterationIdx += 1
'Train the classifier to associate specified input pattern with a particular category. :param inputPattern: (list) The pattern to be assigned a category. If isSparse is 0, this should be a dense array (both ON and OFF bits present). Otherwise, if isSparse > 0, this should be a list of the indices of the non-zero bits in sorted order :param inputCategory: (int) The category to be associated to the training pattern :param partitionId: (int) partitionID allows you to associate an id with each input vector. It can be used to associate input patterns stored in the classifier with an external id. This can be useful for debugging or visualizing. Another use case is to ignore vectors with a specific id during inference (see description of infer() for details). There can be at most one partitionId per stored pattern (i.e. if two patterns are within distThreshold, only the first partitionId will be stored). This is an optional parameter. :param isSparse: (int) If 0, the input pattern is a dense representation. If isSparse > 0, the input pattern is a list of non-zero indices and isSparse is the length of the dense representation :param rowID: (int) UNKNOWN :returns: The number of patterns currently stored in the classifier'
def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=0, rowID=None):
if (self.verbosity >= 1): print ('%s learn:' % g_debugPrefix) print ' category:', int(inputCategory) print ' active inputs:', _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) if (isSparse > 0): assert all(((inputPattern[i] <= inputPattern[(i + 1)]) for i in xrange((len(inputPattern) - 1)))), 'Sparse inputPattern must be sorted.' assert all(((bit < isSparse) for bit in inputPattern)), "Sparse inputPattern must not index outside the dense representation's bounds." if (rowID is None): rowID = self._iterationIdx if (not self.useSparseMemory): assert (self.cellsPerCol == 0), 'not implemented for dense vectors' if (isSparse > 0): denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput if (self._specificIndexTraining and (not self._nextTrainingIndices)): return self._numPatterns if (self._Memory is None): inputWidth = len(inputPattern) self._Memory = numpy.zeros((100, inputWidth)) self._numPatterns = 0 self._M = self._Memory[:self._numPatterns] addRow = True if (self._vt is not None): inputPattern = numpy.dot(self._vt, (inputPattern - self._mean)) if (self.distThreshold > 0): dist = self._calcDistance(inputPattern) minDist = dist.min() addRow = (minDist >= self.distThreshold) if addRow: self._protoSizes = None if (self._numPatterns == self._Memory.shape[0]): self._doubleMemoryNumRows() if (not self._specificIndexTraining): self._Memory[self._numPatterns] = inputPattern self._numPatterns += 1 self._categoryList.append(int(inputCategory)) else: vectorIndex = self._nextTrainingIndices.pop(0) while (vectorIndex >= self._Memory.shape[0]): self._doubleMemoryNumRows() self._Memory[vectorIndex] = inputPattern self._numPatterns = max(self._numPatterns, (vectorIndex + 1)) if (vectorIndex >= len(self._categoryList)): self._categoryList += ([(-1)] * ((vectorIndex - len(self._categoryList)) + 1)) self._categoryList[vectorIndex] = int(inputCategory) self._M = self._Memory[0:self._numPatterns] self._addPartitionId((self._numPatterns - 1), partitionId) else: if ((isSparse > 0) and ((self._vt is not None) or (self.distThreshold > 0) or (self.numSVDDims is not None) or (self.numSVDSamples is not None) or (self.numWinners > 0))): denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput isSparse = 0 if (isSparse > 0): inputWidth = isSparse else: inputWidth = len(inputPattern) if (self._Memory is None): self._Memory = NearestNeighbor(0, inputWidth) if (self._vt is not None): inputPattern = numpy.dot(self._vt, (inputPattern - self._mean)) if (isSparse == 0): thresholdedInput = self._sparsifyVector(inputPattern, True) addRow = True if (self.cellsPerCol >= 1): burstingCols = thresholdedInput.reshape((-1), self.cellsPerCol).min(axis=1).nonzero()[0] for col in burstingCols: thresholdedInput[((col * self.cellsPerCol) + 1):((col * self.cellsPerCol) + self.cellsPerCol)] = 0 if (self._Memory.nRows() > 0): dist = None if self.replaceDuplicates: dist = self._calcDistance(thresholdedInput, distanceNorm=1) if (dist.min() == 0): rowIdx = dist.argmin() self._categoryList[rowIdx] = int(inputCategory) if self.fixedCapacity: self._categoryRecencyList[rowIdx] = rowID addRow = False if (self.distThreshold > 0): if ((dist is None) or (self.distanceNorm != 1)): dist = self._calcDistance(thresholdedInput) minDist = dist.min() addRow = (minDist >= self.distThreshold) if (not addRow): if self.fixedCapacity: rowIdx = dist.argmin() self._categoryRecencyList[rowIdx] = rowID if (addRow and (self.minSparsity > 0.0)): if (isSparse == 0): sparsity = (float(len(thresholdedInput.nonzero()[0])) / len(thresholdedInput)) else: sparsity = (float(len(inputPattern)) / isSparse) if (sparsity < self.minSparsity): addRow = False if addRow: self._protoSizes = None if (isSparse == 0): self._Memory.addRow(thresholdedInput) else: self._Memory.addRowNZ(inputPattern, ([1] * len(inputPattern))) self._numPatterns += 1 self._categoryList.append(int(inputCategory)) self._addPartitionId((self._numPatterns - 1), partitionId) if self.fixedCapacity: self._categoryRecencyList.append(rowID) if ((self._numPatterns > self.maxStoredPatterns) and (self.maxStoredPatterns > 0)): leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList) self._Memory.deleteRow(leastRecentlyUsedPattern) self._categoryList.pop(leastRecentlyUsedPattern) self._categoryRecencyList.pop(leastRecentlyUsedPattern) self._numPatterns -= 1 if ((self.numSVDDims is not None) and (self.numSVDSamples is not None) and (self._numPatterns == self.numSVDSamples)): self.computeSVD() return self._numPatterns
'Return the degree of overlap between an input pattern and each category stored in the classifier. The overlap is computed by computing: .. code-block:: python logical_and(inputPattern != 0, trainingPattern != 0).sum() :param inputPattern: pattern to check overlap of :returns: (overlaps, categories) Two numpy arrays of the same length, where: * overlaps: an integer overlap amount for each category * categories: category index for each element of overlaps'
def getOverlaps(self, inputPattern):
assert self.useSparseMemory, 'Not implemented yet for dense storage' overlaps = self._Memory.rightVecSumAtNZ(inputPattern) return (overlaps, self._categoryList)
'Return the distances between the input pattern and all other stored patterns. :param inputPattern: pattern to check distance with :returns: (distances, categories) numpy arrays of the same length. - overlaps: an integer overlap amount for each category - categories: category index for each element of distances'
def getDistances(self, inputPattern):
dist = self._getDistances(inputPattern) return (dist, self._categoryList)
'Finds the category that best matches the input pattern. Returns the winning category index as well as a distribution over all categories. :param inputPattern: (list) A pattern to be classified :param computeScores: NO EFFECT :param overCategories: NO EFFECT :param partitionId: (int) If provided, all training vectors with partitionId equal to that of the input pattern are ignored. For example, this may be used to perform k-fold cross validation without repopulating the classifier. First partition all the data into k equal partitions numbered 0, 1, 2, ... and then call learn() for each vector passing in its partitionId. Then, during inference, by passing in the partition ID in the call to infer(), all other vectors with the same partitionId are ignored simulating the effect of repopulating the classifier while ommitting the training vectors in the same partition. :returns: 4-tuple with these keys: - ``winner``: The category with the greatest number of nearest neighbors within the kth nearest neighbors. If the inferenceResult contains no neighbors, the value of winner is None. This can happen, for example, in cases of exact matching, if there are no stored vectors, or if minSparsity is not met. - ``inferenceResult``: A list of length numCategories, each entry contains the number of neighbors within the top k neighbors that are in that category. - ``dist``: A list of length numPrototypes. Each entry is the distance from the unknown to that prototype. All distances are between 0.0 and 1.0. - ``categoryDist``: A list of length numCategories. Each entry is the distance from the unknown to the nearest prototype of that category. All distances are between 0 and 1.0.'
def infer(self, inputPattern, computeScores=True, overCategories=True, partitionId=None):
sparsity = 0.0 if (self.minSparsity > 0.0): sparsity = (float(len(inputPattern.nonzero()[0])) / len(inputPattern)) if ((len(self._categoryList) == 0) or (sparsity < self.minSparsity)): winner = None inferenceResult = numpy.zeros(1) dist = numpy.ones(1) categoryDist = numpy.ones(1) else: maxCategoryIdx = max(self._categoryList) inferenceResult = numpy.zeros((maxCategoryIdx + 1)) dist = self._getDistances(inputPattern, partitionId=partitionId) validVectorCount = (len(self._categoryList) - self._categoryList.count((-1))) if self.exact: exactMatches = numpy.where((dist < 1e-05))[0] if (len(exactMatches) > 0): for i in exactMatches[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[i]] += 1.0 else: sorted = dist.argsort() for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 if inferenceResult.any(): winner = inferenceResult.argmax() inferenceResult /= inferenceResult.sum() else: winner = None categoryDist = min_score_per_category(maxCategoryIdx, self._categoryList, dist) categoryDist.clip(0, 1.0, categoryDist) if (self.verbosity >= 1): print ('%s infer:' % g_debugPrefix) print ' active inputs:', _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) print ' winner category:', winner print ' pct neighbors of each category:', inferenceResult print ' dist of each prototype:', dist print ' dist of each category:', categoryDist result = (winner, inferenceResult, dist, categoryDist) return result
'Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories.'
def getClosest(self, inputPattern, topKCategories=3):
inferenceResult = numpy.zeros((max(self._categoryList) + 1)) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = (len(self._categoryList) - self._categoryList.count((-1))) for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 winner = inferenceResult.argmax() topNCats = [] for i in range(topKCategories): topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]])) return (winner, dist, topNCats)
'Returns the closest training pattern to inputPattern that belongs to category "cat". :param inputPattern: The pattern whose closest neighbor is sought :param cat: The required category of closest neighbor :returns: A dense version of the closest training pattern, or None if no such patterns exist'
def closestTrainingPattern(self, inputPattern, cat):
dist = self._getDistances(inputPattern) sorted = dist.argsort() for patIdx in sorted: patternCat = self._categoryList[patIdx] if (patternCat == cat): if self.useSparseMemory: closestPattern = self._Memory.getRow(int(patIdx)) else: closestPattern = self._M[patIdx] return closestPattern return None
'Return the closest training pattern that is *not* of the given category "cat". :param inputPattern: The pattern whose closest neighbor is sought :param cat: Training patterns of this category will be ignored no matter their distance to inputPattern :returns: A dense version of the closest training pattern, or None if no such patterns exist'
def closestOtherTrainingPattern(self, inputPattern, cat):
dist = self._getDistances(inputPattern) sorted = dist.argsort() for patIdx in sorted: patternCat = self._categoryList[patIdx] if (patternCat != cat): if self.useSparseMemory: closestPattern = self._Memory.getRow(int(patIdx)) else: closestPattern = self._M[patIdx] return closestPattern return None
'Gets a training pattern either by index or category number. :param idx: Index of the training pattern :param sparseBinaryForm: If true, returns a list of the indices of the non-zero bits in the training pattern :param cat: If not None, get the first pattern belonging to category cat. If this is specified, idx must be None. :returns: The training pattern with specified index'
def getPattern(self, idx, sparseBinaryForm=False, cat=None):
if (cat is not None): assert (idx is None) idx = self._categoryList.index(cat) if (not self.useSparseMemory): pattern = self._Memory[idx] if sparseBinaryForm: pattern = pattern.nonzero()[0] else: (nz, values) = self._Memory.rowNonZeros(idx) if (not sparseBinaryForm): pattern = numpy.zeros(self._Memory.nCols()) numpy.put(pattern, nz, 1) else: pattern = nz return pattern
'Gets the partition id given an index. :param i: index of partition :returns: the partition id associated with pattern i. Returns None if no id is associated with it.'
def getPartitionId(self, i):
if ((i < 0) or (i >= self._numPatterns)): raise RuntimeError('index out of bounds') partitionId = self._partitionIdList[i] if (partitionId == numpy.inf): return None else: return partitionId
':returns: a list of complete partition id objects'
def getPartitionIdList(self):
return self._partitionIdList
':returns: the number of unique partition Ids stored.'
def getNumPartitionIds(self):
return len(self._partitionIdMap)
':returns: a list containing unique (non-None) partition Ids (just the keys)'
def getPartitionIdKeys(self):
return self._partitionIdMap.keys()
':returns: a list of pattern indices corresponding to this partitionId. Return an empty list if there are none.'
def getPatternIndicesWithPartitionId(self, partitionId):
return self._partitionIdMap.get(partitionId, [])
'Adds partition id for pattern index'
def _addPartitionId(self, index, partitionId=None):
if (partitionId is None): self._partitionIdList.append(numpy.inf) else: self._partitionIdList.append(partitionId) indices = self._partitionIdMap.get(partitionId, []) indices.append(index) self._partitionIdMap[partitionId] = indices
'Rebuilds the partition Id map using the given partitionIdList'
def _rebuildPartitionIdMap(self, partitionIdList):
self._partitionIdMap = {} for (row, partitionId) in enumerate(partitionIdList): indices = self._partitionIdMap.get(partitionId, []) indices.append(row) self._partitionIdMap[partitionId] = indices
'Calculate the distances from inputPattern to all stored patterns. All distances are between 0.0 and 1.0 :param inputPattern The pattern from which distances to all other patterns are calculated :param distanceNorm Degree of the distance norm'
def _calcDistance(self, inputPattern, distanceNorm=None):
if (distanceNorm is None): distanceNorm = self.distanceNorm if self.useSparseMemory: if (self._protoSizes is None): self._protoSizes = self._Memory.rowSums() overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern) inputPatternSum = inputPattern.sum() if (self.distanceMethod == 'rawOverlap'): dist = (inputPattern.sum() - overlapsWithProtos) elif (self.distanceMethod == 'pctOverlapOfInput'): dist = (inputPatternSum - overlapsWithProtos) if (inputPatternSum > 0): dist /= inputPatternSum elif (self.distanceMethod == 'pctOverlapOfProto'): overlapsWithProtos /= self._protoSizes dist = (1.0 - overlapsWithProtos) elif (self.distanceMethod == 'pctOverlapOfLarger'): maxVal = numpy.maximum(self._protoSizes, inputPatternSum) if (maxVal.all() > 0): overlapsWithProtos /= maxVal dist = (1.0 - overlapsWithProtos) elif (self.distanceMethod == 'norm'): dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern) distMax = dist.max() if (distMax > 0): dist /= distMax else: raise RuntimeError(('Unimplemented distance method %s' % self.distanceMethod)) elif (self.distanceMethod == 'norm'): dist = numpy.power(numpy.abs((self._M - inputPattern)), self.distanceNorm) dist = dist.sum(1) dist = numpy.power(dist, (1.0 / self.distanceNorm)) dist /= dist.max() else: raise RuntimeError('Not implemented yet for dense storage....') return dist
'Return the distances from inputPattern to all stored patterns. :param inputPattern The pattern from which distances to all other patterns are returned :param partitionId If provided, ignore all training vectors with this partitionId.'
def _getDistances(self, inputPattern, partitionId=None):
if (not self._finishedLearning): self.finishLearning() self._finishedLearning = True if ((self._vt is not None) and (len(self._vt) > 0)): inputPattern = numpy.dot(self._vt, (inputPattern - self._mean)) sparseInput = self._sparsifyVector(inputPattern) dist = self._calcDistance(sparseInput) if self._specificIndexTraining: dist[(numpy.array(self._categoryList) == (-1))] = numpy.inf if (partitionId is not None): dist[self._partitionIdMap.get(partitionId, [])] = numpy.inf return dist
'Used for batch scenarios. This method needs to be called between learning and inference.'
def finishLearning(self):
if ((self.numSVDDims is not None) and (self._vt is None)): self.computeSVD()
'Compute the singular value decomposition (SVD). The SVD is a factorization of a real or complex matrix. It factors the matrix `a` as `u * np.diag(s) * v`, where `u` and `v` are unitary and `s` is a 1-d array of `a`\'s singular values. **Reason for computing the SVD:** There are cases where you want to feed a lot of vectors to the KNNClassifier. However, this can be slow. You can speed up training by (1) computing the SVD of the input patterns which will give you the eigenvectors, (2) only keeping a fraction of the eigenvectors, and (3) projecting the input patterns onto the remaining eigenvectors. Note that all input patterns are projected onto the eigenvectors in the same fashion. Keeping only the highest eigenvectors increases training performance since it reduces the dimensionality of the input. :param numSVDSamples: (int) the number of samples to use for the SVD computation. :param finalize: (bool) whether to apply SVD to the input patterns. :returns: (array) The singular values for every matrix, sorted in descending order.'
def computeSVD(self, numSVDSamples=None, finalize=True):
if (numSVDSamples is None): numSVDSamples = self._numPatterns if (not self.useSparseMemory): self._a = self._Memory[:self._numPatterns] else: self._a = self._Memory.toDense()[:self._numPatterns] self._mean = numpy.mean(self._a, axis=0) self._a -= self._mean (u, self._s, self._vt) = numpy.linalg.svd(self._a[:numSVDSamples]) if finalize: self._finalizeSVD() return self._s
'Compute the number of eigenvectors (singularValues) to keep. :param singularValues: :param fractionOfMax: :return:'
def getAdaptiveSVDDims(self, singularValues, fractionOfMax=0.001):
v = (singularValues / singularValues[0]) idx = numpy.where((v < fractionOfMax))[0] if len(idx): print 'Number of PCA dimensions chosen: ', idx[0], 'out of ', len(v) return idx[0] else: print 'Number of PCA dimensions chosen: ', (len(v) - 1), 'out of ', len(v) return (len(v) - 1)
'Called by finalizeLearning(). This will project all the patterns onto the SVD eigenvectors. :param numSVDDims: (int) number of egeinvectors used for projection. :return:'
def _finalizeSVD(self, numSVDDims=None):
if (numSVDDims is not None): self.numSVDDims = numSVDDims if (self.numSVDDims == 'adaptive'): if (self.fractionOfMax is not None): self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax) else: self.numSVDDims = self.getAdaptiveSVDDims(self._s) if (self._vt.shape[0] < self.numSVDDims): print '******************************************************************' print 'Warning: The requested number of PCA dimensions is more than the number of pattern dimensions.' print 'Setting numSVDDims = ', self._vt.shape[0] print '******************************************************************' self.numSVDDims = self._vt.shape[0] self._vt = self._vt[:self.numSVDDims] if (len(self._vt) == 0): return self._Memory = numpy.zeros((self._numPatterns, self.numSVDDims)) self._M = self._Memory self.useSparseMemory = False for i in range(self._numPatterns): self._Memory[i] = numpy.dot(self._vt, self._a[i]) self._a = None
'Change the category indices. Used by the Network Builder to keep the category indices in sync with the ImageSensor categoryInfo when the user renames or removes categories. :param mapping: List of new category indices. For example, mapping=[2,0,1] would change all vectors of category 0 to be category 2, category 1 to 0, and category 2 to 1'
def remapCategories(self, mapping):
categoryArray = numpy.array(self._categoryList) newCategoryArray = numpy.zeros(categoryArray.shape[0]) newCategoryArray.fill((-1)) for i in xrange(len(mapping)): newCategoryArray[(categoryArray == i)] = mapping[i] self._categoryList = list(newCategoryArray)
'Change the category associated with this vector(s). Used by the Network Builder to move vectors between categories, to enable categories, and to invalidate vectors by setting the category to -1. :param vectorIndices: Single index or list of indices :param categoryIndices: Single index or list of indices. Can also be a single index when vectorIndices is a list, in which case the same category will be used for all vectors'
def setCategoryOfVectors(self, vectorIndices, categoryIndices):
if (not hasattr(vectorIndices, '__iter__')): vectorIndices = [vectorIndices] categoryIndices = [categoryIndices] elif (not hasattr(categoryIndices, '__iter__')): categoryIndices = ([categoryIndices] * len(vectorIndices)) for i in xrange(len(vectorIndices)): vectorIndex = vectorIndices[i] categoryIndex = categoryIndices[i] if (vectorIndex < len(self._categoryList)): self._categoryList[vectorIndex] = categoryIndex
'Return serializable state. This function will return a version of the __dict__.'
def __getstate__(self):
state = self.__dict__.copy() return state
'Set the state of this object from a serialized state.'
def __setstate__(self, state):
if ('version' not in state): pass elif (state['version'] == 1): pass elif (state['version'] == 2): raise RuntimeError('Invalid deserialization of invalid KNNClassifierVersion') if ('_partitionIdArray' in state): state.pop('_partitionIdArray') if ('minSparsity' not in state): state['minSparsity'] = 0.0 self.__dict__.update(state) if ('_partitionIdMap' not in state): self._rebuildPartitionIdMap(self._partitionIdList) self.version = KNNCLASSIFIER_VERSION
'Compute the anomaly score as the percent of active columns not predicted. :param activeColumns: array of active column indices :param predictedColumns: array of columns indices predicted in this step (used for anomaly in step T+1) :param inputValue: (optional) value of current input to encoders (eg "cat" for category encoder) (used in anomaly-likelihood) :param timestamp: (optional) date timestamp when the sample occured (used in anomaly-likelihood) :returns: the computed anomaly score; float 0..1'
def compute(self, activeColumns, predictedColumns, inputValue=None, timestamp=None):
anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns) if (self._mode == Anomaly.MODE_PURE): score = anomalyScore elif (self._mode == Anomaly.MODE_LIKELIHOOD): if (inputValue is None): raise ValueError("Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' requires 'inputValue' as parameter to compute() method. ") probability = self._likelihood.anomalyProbability(inputValue, anomalyScore, timestamp) score = (1 - probability) elif (self._mode == Anomaly.MODE_WEIGHTED): probability = self._likelihood.anomalyProbability(inputValue, anomalyScore, timestamp) score = (anomalyScore * (1 - probability)) if (self._movingAverage is not None): score = self._movingAverage.next(score) if (self._binaryThreshold is not None): if (score >= self._binaryThreshold): score = 1.0 else: score = 0.0 return score
'deserialization'
def __setstate__(self, state):
self.__dict__.update(state) if (not hasattr(self, '_mode')): self._mode = Anomaly.MODE_PURE if (not hasattr(self, '_movingAverage')): self._movingAverage = None if (not hasattr(self, '_binaryThreshold')): self._binaryThreshold = None
'Translate parameters and initialize member variables specific to `backtracking_tm.py`.'
def __init__(self, numberOfCols=500, cellsPerColumn=10, initialPerm=0.11, connectedPerm=0.5, minThreshold=8, newSynapseCount=15, permanenceInc=0.1, permanenceDec=0.1, permanenceMax=1.0, activationThreshold=12, predictedSegmentDecrement=0.0, maxSegmentsPerCell=255, maxSynapsesPerSegment=255, globalDecay=0.1, maxAge=100000, pamLength=1, verbosity=0, outputType='normal', seed=42):
super(TMShimMixin, self).__init__(columnDimensions=(numberOfCols,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, initialPermanence=initialPerm, connectedPermanence=connectedPerm, minThreshold=minThreshold, maxNewSynapseCount=newSynapseCount, permanenceIncrement=permanenceInc, permanenceDecrement=permanenceDec, predictedSegmentDecrement=predictedSegmentDecrement, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment, seed=seed) self.infActiveState = {'t': None}
'Intercepts TemporalMemory deserialization request in order to initialize `self.infActiveState` @param proto (DynamicStructBuilder) Proto object @return (TemporalMemory) TemporalMemory shim instance'
@classmethod def read(cls, proto):
tm = super(TMShimMixin, cls).read(proto) tm.infActiveState = {'t': None} return tm
'(From `backtracking_tm.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @param enableLearn If true, perform learning @param computeInfOutput If None, default behavior is to disable the inference output when enableLearn is on. If true, compute the inference output If false, do not compute the inference output'
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
super(TMShimMixin, self).compute(set(bottomUpInput.nonzero()[0]), learn=enableLearn) numberOfCells = self.numberOfCells() activeState = numpy.zeros(numberOfCells) activeState[self.getActiveCells()] = 1 self.infActiveState['t'] = activeState output = numpy.zeros(numberOfCells) output[self.getPredictiveCells()] = 1 output[self.getActiveCells()] = 1 return output
'(From `backtracking_tm.py`) Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us @returns best estimate of the TM input that would have generated bottomUpOut.'
def topDownCompute(self, topDownIn=None):
output = numpy.zeros(self.numberOfColumns()) columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()] output[columns] = 1 return output
'Translate parameters and initialize member variables specific to `backtracking_tm.py`.'
def __init__(self, numberOfCols=500, cellsPerColumn=10, initialPerm=0.11, connectedPerm=0.5, minThreshold=8, newSynapseCount=15, permanenceInc=0.1, permanenceDec=0.1, permanenceMax=1.0, activationThreshold=12, predictedSegmentDecrement=0.0, maxSegmentsPerCell=255, maxSynapsesPerSegment=255, globalDecay=0.1, maxAge=100000, pamLength=1, verbosity=0, outputType='normal', seed=42):
super(MonitoredTMShim, self).__init__(columnDimensions=(numberOfCols,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, initialPermanence=initialPerm, connectedPermanence=connectedPerm, minThreshold=minThreshold, maxNewSynapseCount=newSynapseCount, permanenceIncrement=permanenceInc, permanenceDecrement=permanenceDec, predictedSegmentDecrement=predictedSegmentDecrement, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment, seed=seed) self.infActiveState = {'t': None}
'(From `backtracking_tm.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @param enableLearn If true, perform learning @param computeInfOutput If None, default behavior is to disable the inference output when enableLearn is on. If true, compute the inference output If false, do not compute the inference output'
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
super(MonitoredTMShim, self).compute(set(bottomUpInput.nonzero()[0]), learn=enableLearn) numberOfCells = self.numberOfCells() activeState = numpy.zeros(numberOfCells) activeState[self.getActiveCells()] = 1 self.infActiveState['t'] = activeState output = numpy.zeros(numberOfCells) output[(self.getPredictiveCells() + self.getActiveCells())] = 1 return output
'(From `backtracking_tm.py`) Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us @returns best estimate of the TM input that would have generated bottomUpOut.'
def topDownCompute(self, topDownIn=None):
output = numpy.zeros(self.numberOfColumns()) columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()] output[columns] = 1 return output
'Populate serialization proto instance. :param proto: (BacktrackingTMCppProto) the proto instance to populate'
def write(self, proto):
super(BacktrackingTMCPP, self).write(proto.baseTM) self.cells4.write(proto.cells4) proto.makeCells4Ephemeral = self.makeCells4Ephemeral proto.seed = self.seed proto.checkSynapseConsistency = self.checkSynapseConsistency proto.initArgs = json.dumps(self._initArgsDict)
'Deserialize from proto instance. :param proto: (BacktrackingTMCppProto) the proto instance to read from'
@classmethod def read(cls, proto):
obj = BacktrackingTM.read(proto.baseTM) obj.__class__ = cls newCells4 = Cells4.read(proto.cells4) print newCells4 obj.cells4 = newCells4 obj.makeCells4Ephemeral = proto.makeCells4Ephemeral obj.seed = proto.seed obj.checkSynapseConsistency = proto.checkSynapseConsistency obj._initArgsDict = json.loads(proto.initArgs) obj._initArgsDict['outputType'] = str(obj._initArgsDict['outputType']) obj.allocateStatesInCPP = False obj.retrieveLearningStates = False obj._setStatePointers() return obj
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
super(BacktrackingTMCPP, self).__setstate__(state) if self.makeCells4Ephemeral: self.cells4 = Cells4(self.numberOfCols, self.cellsPerColumn, self.activationThreshold, self.minThreshold, self.newSynapseCount, self.segUpdateValidDuration, self.initialPerm, self.connectedPerm, self.permanenceMax, self.permanenceDec, self.permanenceInc, self.globalDecay, self.doPooling, self.seed, self.allocateStatesInCPP, self.checkSynapseConsistency) self.cells4.setVerbosity(self.verbosity) self.cells4.setPamLength(self.pamLength) self.cells4.setMaxAge(self.maxAge) self.cells4.setMaxInfBacktrack(self.maxInfBacktrack) self.cells4.setMaxLrnBacktrack(self.maxLrnBacktrack) self.cells4.setMaxSeqLength(self.maxSeqLength) self.cells4.setMaxSegmentsPerCell(self.maxSegmentsPerCell) self.cells4.setMaxSynapsesPerCell(self.maxSynapsesPerSegment) self._setStatePointers()
'List of our member variables that we don\'t need to be saved'
def _getEphemeralMembers(self):
e = BacktrackingTM._getEphemeralMembers(self) if self.makeCells4Ephemeral: e.extend(['cells4']) return e
'Initialize all ephemeral members after being restored to a pickled state.'
def _initEphemerals(self):
BacktrackingTM._initEphemerals(self) self.allocateStatesInCPP = False self.retrieveLearningStates = False if self.makeCells4Ephemeral: self.cells4 = Cells4(self.numberOfCols, self.cellsPerColumn, self.activationThreshold, self.minThreshold, self.newSynapseCount, self.segUpdateValidDuration, self.initialPerm, self.connectedPerm, self.permanenceMax, self.permanenceDec, self.permanenceInc, self.globalDecay, self.doPooling, self.seed, self.allocateStatesInCPP, self.checkSynapseConsistency) self.cells4.setVerbosity(self.verbosity) self.cells4.setPamLength(self.pamLength) self.cells4.setMaxAge(self.maxAge) self.cells4.setMaxInfBacktrack(self.maxInfBacktrack) self.cells4.setMaxLrnBacktrack(self.maxLrnBacktrack) self.cells4.setMaxSeqLength(self.maxSeqLength) self.cells4.setMaxSegmentsPerCell(self.maxSegmentsPerCell) self.cells4.setMaxSynapsesPerCell(self.maxSynapsesPerSegment) self._setStatePointers()
'Save Cells4 state to a file. File can be loaded with :meth:`loadFromFile`.'
def saveToFile(self, filePath):
self.cells4.saveToFile(filePath)
'Load Cells4 state from a file saved with :meth:`saveToFile`.'
def loadFromFile(self, filePath):
self.cells4.loadFromFile(filePath)
'Patch __getattr__ so that we can catch the first access to \'cells\' and load. This function is only called when we try to access an attribute that doesn\'t exist. We purposely make sure that "self.cells" doesn\'t exist after unpickling so that we\'ll hit this, then we can load it on the first access. If this is called at any other time, it will raise an AttributeError. That\'s because: - If \'name\' is "cells", after the first call, self._realCells won\'t exist so we\'ll get an implicit AttributeError. - If \'name\' isn\'t "cells", I\'d expect our super wouldn\'t have __getattr__, so we\'ll raise our own Attribute error. If the super did get __getattr__, we\'ll just return what it gives us.'
def __getattr__(self, name):
try: return super(BacktrackingTM, self).__getattr__(name) except AttributeError: raise AttributeError(("'TM' object has no attribute '%s'" % name))
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.'
def compute(self, bottomUpInput, enableLearn, enableInference=None):
assert ((bottomUpInput.dtype == numpy.dtype('float32')) or (bottomUpInput.dtype == numpy.dtype('uint32')) or (bottomUpInput.dtype == numpy.dtype('int32'))) self.iterationIdx = (self.iterationIdx + 1) if (enableInference is None): if enableLearn: enableInference = False else: enableInference = True self._setStatePointers() y = self.cells4.compute(bottomUpInput, enableInference, enableLearn) self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn)) self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength() self._copyAllocatedStates() if self.collectStats: activeColumns = bottomUpInput.nonzero()[0] if enableInference: predictedState = self.infPredictedState['t-1'] else: predictedState = self.lrnPredictedState['t-1'] self._updateStatsInferEnd(self._internalStats, activeColumns, predictedState, self.colConfidence['t-1']) output = self._computeOutput() self.printComputeEnd(output, learn=enableLearn) self.resetCalled = False return output
'This calls phase 2 of inference (used in multistep prediction).'
def _inferPhase2(self):
self._setStatePointers() self.cells4.inferPhase2() self._copyAllocatedStates()
'If state is allocated in CPP, copy over the data into our numpy arrays.'
def _copyAllocatedStates(self):
if ((self.verbosity > 1) or self.retrieveLearningStates): (activeT, activeT1, predT, predT1) = self.cells4.getLearnStates() self.lrnActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn)) if self.allocateStatesInCPP: assert False (activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT, confidenceT1) = self.cells4.getStates() self.confidence['t-1'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.confidence['t'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn)) self.colConfidence['t'] = colConfidenceT.reshape(self.numberOfCols) self.colConfidence['t-1'] = colConfidenceT1.reshape(self.numberOfCols) self.infActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
'If we are having CPP use numpy-allocated buffers, set these buffer pointers. This is a relatively fast operation and, for safety, should be done before every call to the cells4 compute methods. This protects us in situations where code can cause Python or numpy to create copies.'
def _setStatePointers(self):
if (not self.allocateStatesInCPP): self.cells4.setStatePointers(self.infActiveState['t'], self.infActiveState['t-1'], self.infPredictedState['t'], self.infPredictedState['t-1'], self.colConfidence['t'], self.colConfidence['t-1'], self.cellConfidence['t'], self.cellConfidence['t-1'])
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.reset`.'
def reset(self):
if (self.verbosity >= 3): print 'TM Reset' self._setStatePointers() self.cells4.reset() BacktrackingTM.reset(self)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.finishLearning`.'
def finishLearning(self):
self.trimSegments(minPermanence=0.0001)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.trimSegments`.'
def trimSegments(self, minPermanence=None, minNumSyns=None):
if (minPermanence is None): minPermanence = 0.0 if (minNumSyns is None): minNumSyns = 0 if (self.verbosity >= 5): print 'Cells, all segments:' self.printCells(predictedOnly=False) return self.cells4.trimSegments(minPermanence=minPermanence, minNumSyns=minNumSyns)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.'
def printSegmentUpdates(self):
assert False print '=== SEGMENT UPDATES ===, Num = ', len(self.segmentUpdates) for (key, updateList) in self.segmentUpdates.iteritems(): (c, i) = (key[0], key[1]) print c, i, updateList
'A segment is active if it has >= activationThreshold connected synapses that are active due to infActiveState.'
def _slowIsSegmentActive(self, seg, timeStep):
numSyn = seg.size() numActiveSyns = 0 for synIdx in xrange(numSyn): if (seg.getPermanence(synIdx) < self.connectedPerm): continue (sc, si) = self.getColCellIdx(seg.getSrcCellIdx(synIdx)) if self.infActiveState[timeStep][(sc, si)]: numActiveSyns += 1 if (numActiveSyns >= self.activationThreshold): return True return (numActiveSyns >= self.activationThreshold)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printCell`.'
def printCell(self, c, i, onlyActiveSegments=False):
nSegs = self.cells4.nSegmentsOnCell(c, i) if (nSegs > 0): segList = self.cells4.getNonEmptySegList(c, i) gidx = ((c * self.cellsPerColumn) + i) print 'Column', c, 'Cell', i, ('(%d)' % gidx), ':', nSegs, 'segment(s)' for (k, segIdx) in enumerate(segList): seg = self.cells4.getSegment(c, i, segIdx) isActive = self._slowIsSegmentActive(seg, 't') if (onlyActiveSegments and (not isActive)): continue isActiveStr = ('*' if isActive else ' ') print (' %sSeg #%-3d' % (isActiveStr, segIdx)), print seg.size(), print seg.isSequenceSegment(), ('%9.7f' % seg.dutyCycle(self.cells4.getNLrnIterations(), False, True)), print ('(%4d/%-4d)' % (seg.getPositiveActivations(), seg.getTotalActivations())), print ('%4d' % (self.cells4.getNLrnIterations() - seg.getLastActiveIteration())), numSyn = seg.size() for s in xrange(numSyn): (sc, si) = self.getColCellIdx(seg.getSrcCellIdx(s)) print ('[%d,%d]%4.2f' % (sc, si, seg.getPermanence(s))), print
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getAvgLearnedSeqLength`.'
def getAvgLearnedSeqLength(self):
return self.cells4.getAvgLearnedSeqLength()
'Get column and cell within column from a global cell index. The global index is ``idx = colIdx * nCellsPerCol() + cellIdxInCol`` :param idx: (int) global cell index :returns: (tuple) (colIdx, cellIdxInCol)'
def getColCellIdx(self, idx):
c = (idx // self.cellsPerColumn) i = (idx - (c * self.cellsPerColumn)) return (c, i)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.'
def getSegmentOnCell(self, c, i, segIdx):
segList = self.cells4.getNonEmptySegList(c, i) seg = self.cells4.getSegment(c, i, segList[segIdx]) numSyn = seg.size() assert (numSyn != 0) result = [] result.append([int(segIdx), bool(seg.isSequenceSegment()), seg.getPositiveActivations(), seg.getTotalActivations(), seg.getLastActiveIteration(), seg.getLastPosDutyCycle(), seg.getLastPosDutyCycleIteration()]) for s in xrange(numSyn): (sc, si) = self.getColCellIdx(seg.getSrcCellIdx(s)) result.append([int(sc), int(si), seg.getPermanence(s)]) return result
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegments`.'
def getNumSegments(self):
return self.cells4.nSegments()
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSynapses`.'
def getNumSynapses(self):
return self.cells4.nSynapses()
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegmentsInCell`.'
def getNumSegmentsInCell(self, c, i):
return self.cells4.nSegmentsOnCell(c, i)
'Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentInfo`.'
def getSegmentInfo(self, collectActiveData=False):
assert (collectActiveData == False) (nSegments, nSynapses) = (self.getNumSegments(), self.cells4.nSynapses()) (distSegSizes, distNSegsPerCell) = ({}, {}) (nActiveSegs, nActiveSynapses) = (0, 0) distPermValues = {} numAgeBuckets = 20 distAges = [] ageBucketSize = int(((self.iterationIdx + 20) / 20)) for i in range(numAgeBuckets): distAges.append([('%d-%d' % ((i * ageBucketSize), (((i + 1) * ageBucketSize) - 1))), 0]) for c in xrange(self.numberOfCols): for i in xrange(self.cellsPerColumn): nSegmentsThisCell = self.getNumSegmentsInCell(c, i) if (nSegmentsThisCell > 0): if distNSegsPerCell.has_key(nSegmentsThisCell): distNSegsPerCell[nSegmentsThisCell] += 1 else: distNSegsPerCell[nSegmentsThisCell] = 1 segList = self.cells4.getNonEmptySegList(c, i) for segIdx in xrange(nSegmentsThisCell): seg = self.getSegmentOnCell(c, i, segIdx) nSynapsesThisSeg = (len(seg) - 1) if (nSynapsesThisSeg > 0): if distSegSizes.has_key(nSynapsesThisSeg): distSegSizes[nSynapsesThisSeg] += 1 else: distSegSizes[nSynapsesThisSeg] = 1 for syn in seg[1:]: p = int((syn[2] * 10)) if distPermValues.has_key(p): distPermValues[p] += 1 else: distPermValues[p] = 1 segObj = self.cells4.getSegment(c, i, segList[segIdx]) age = (self.iterationIdx - segObj.getLastActiveIteration()) ageBucket = int((age / ageBucketSize)) distAges[ageBucket][1] += 1 return (nSegments, nSynapses, nActiveSegs, nActiveSynapses, distSegSizes, distNSegsPerCell, distPermValues, distAges)
'Wraps getRow() such that instances may be indexed by columnIndex.'
def __getitem__(self, columnIndex):
return super(_SparseMatrixCorticalColumnAdapter, self).getRow(columnIndex)
'Wraps replaceSparseRow()'
def replace(self, columnIndex, bitmap):
return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(columnIndex, bitmap)
'Wraps setRowFromDense()'
def update(self, columnIndex, vector):
return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(columnIndex, vector)
':returns: (iter) the dimensions of the columns in the region'
def getColumnDimensions(self):
return self._columnDimensions
':returns: (iter) the dimensions of the input vector'
def getInputDimensions(self):
return self._inputDimensions
':returns: (int) the total number of columns'
def getNumColumns(self):
return self._numColumns
':returns: (int) the total number of inputs.'
def getNumInputs(self):
return self._numInputs
':returns: (float) the potential radius'
def getPotentialRadius(self):
return self._potentialRadius
':param potentialRadius: (float) value to set'
def setPotentialRadius(self, potentialRadius):
self._potentialRadius = potentialRadius
':returns: (float) the potential percent'
def getPotentialPct(self):
return self._potentialPct
':param potentialPct: (float) value to set'
def setPotentialPct(self, potentialPct):
self._potentialPct = potentialPct
':returns: (bool) whether global inhibition is enabled.'
def getGlobalInhibition(self):
return self._globalInhibition
':param globalInhibition: (bool) value to set.'
def setGlobalInhibition(self, globalInhibition):
self._globalInhibition = globalInhibition
':returns: (float) the number of active columns per inhibition area. Returns a value less than 0 if parameter is unused.'
def getNumActiveColumnsPerInhArea(self):
return self._numActiveColumnsPerInhArea
'Sets the number of active columns per inhibition area. Invalidates the ``localAreaDensity`` parameter :param numActiveColumnsPerInhArea: (float) value to set'
def setNumActiveColumnsPerInhArea(self, numActiveColumnsPerInhArea):
assert (numActiveColumnsPerInhArea > 0) self._numActiveColumnsPerInhArea = numActiveColumnsPerInhArea self._localAreaDensity = 0
':returns: (float) the local area density. Returns a value less than 0 if parameter is unused.'
def getLocalAreaDensity(self):
return self._localAreaDensity
'Sets the local area density. Invalidates the \'numActiveColumnsPerInhArea\' parameter :param localAreaDensity: (float) value to set'
def setLocalAreaDensity(self, localAreaDensity):
assert ((localAreaDensity > 0) and (localAreaDensity <= 1)) self._localAreaDensity = localAreaDensity self._numActiveColumnsPerInhArea = 0
':returns: (int) the stimulus threshold'
def getStimulusThreshold(self):
return self._stimulusThreshold