desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
':param stimulusThreshold: (float) value to set.'
| def setStimulusThreshold(self, stimulusThreshold):
| self._stimulusThreshold = stimulusThreshold
|
':returns: (int) the inhibition radius'
| def getInhibitionRadius(self):
| return self._inhibitionRadius
|
':param inhibitionRadius: (int) value to set'
| def setInhibitionRadius(self, inhibitionRadius):
| self._inhibitionRadius = inhibitionRadius
|
':returns: (int) the duty cycle period'
| def getDutyCyclePeriod(self):
| return self._dutyCyclePeriod
|
':param dutyCyclePeriod: (int) value to set.'
| def setDutyCyclePeriod(self, dutyCyclePeriod):
| self._dutyCyclePeriod = dutyCyclePeriod
|
':returns: (float) the maximum boost value used.'
| def getBoostStrength(self):
| return self._boostStrength
|
'Sets the maximum boost value.
:param boostStrength: (float) value to set'
| def setBoostStrength(self, boostStrength):
| self._boostStrength = boostStrength
|
':returns: the iteration number'
| def getIterationNum(self):
| return self._iterationNum
|
':param iterationNum: (int) value to set'
| def setIterationNum(self, iterationNum):
| self._iterationNum = iterationNum
|
':returns: (int) The number of iterations that have been learned.'
| def getIterationLearnNum(self):
| return self._iterationLearnNum
|
':param iterationLearnNum: (int) value to set'
| def setIterationLearnNum(self, iterationLearnNum):
| self._iterationLearnNum = iterationLearnNum
|
':returns: (int) the verbosity level, larger is more verbose.'
| def getSpVerbosity(self):
| return self._spVerbosity
|
':param spVerbosity: (int) value to set, larger is more verbose.'
| def setSpVerbosity(self, spVerbosity):
| self._spVerbosity = spVerbosity
|
':returns: (int) The period at which active duty cycles are updated.'
| def getUpdatePeriod(self):
| return self._updatePeriod
|
':param updatePeriod: (int) The period at which active duty cycles are
updated.'
| def setUpdatePeriod(self, updatePeriod):
| self._updatePeriod = updatePeriod
|
'Sparsity is enforced by trimming out all permanence values below this value.
:returns: (float) the permanence trim threshold'
| def getSynPermTrimThreshold(self):
| return self._synPermTrimThreshold
|
'Sparsity is enforced by trimming out all permanence values below this value.
:param synPermTrimThreshold: (float) the permanence trim threshold'
| def setSynPermTrimThreshold(self, synPermTrimThreshold):
| self._synPermTrimThreshold = synPermTrimThreshold
|
':returns: (float) the permanence increment amount for active synapses inputs'
| def getSynPermActiveInc(self):
| return self._synPermActiveInc
|
'Sets the permanence increment amount for active synapses.
:param synPermActiveInc: (float) value to set.'
| def setSynPermActiveInc(self, synPermActiveInc):
| self._synPermActiveInc = synPermActiveInc
|
':returns: (float) the permanence decrement amount for inactive synapses.'
| def getSynPermInactiveDec(self):
| return self._synPermInactiveDec
|
'Sets the permanence decrement amount for inactive synapses.
:param synPermInactiveDec: (float) value to set.'
| def setSynPermInactiveDec(self, synPermInactiveDec):
| self._synPermInactiveDec = synPermInactiveDec
|
':returns: (float) the permanence increment amount for columns that have not
been recently active.'
| def getSynPermBelowStimulusInc(self):
| return self._synPermBelowStimulusInc
|
'Sets the permanence increment amount for columns that have not been
recently active.
:param synPermBelowStimulusInc: (float) value to set.'
| def setSynPermBelowStimulusInc(self, synPermBelowStimulusInc):
| self._synPermBelowStimulusInc = synPermBelowStimulusInc
|
':returns: (float) the permanence amount that qualifies a synapse as being
connected.'
| def getSynPermConnected(self):
| return self._synPermConnected
|
'Sets the permanence amount that qualifies a synapse as being
connected.
:param synPermConnected: (float) value to set.'
| def setSynPermConnected(self, synPermConnected):
| self._synPermConnected = synPermConnected
|
':returns: (float) the minimum tolerated overlaps, given as percent of
neighbors overlap score'
| def getMinPctOverlapDutyCycles(self):
| return self._minPctOverlapDutyCycles
|
'Sets the minimum tolerated activity duty cycle, given as percent of
neighbors\' activity duty cycle.
:param minPctOverlapDutyCycles: (float) value to set.'
| def setMinPctOverlapDutyCycles(self, minPctOverlapDutyCycles):
| self._minPctOverlapDutyCycles = minPctOverlapDutyCycles
|
'Gets the boost factors for all columns. Input list will be overwritten.
:param boostFactors: (list) size must match number of columns.'
| def getBoostFactors(self, boostFactors):
| boostFactors[:] = self._boostFactors[:]
|
'Sets the boost factors for all columns. ``boostFactors`` size must match
the number of columns.
:param boostFactors: (iter) value to set.'
| def setBoostFactors(self, boostFactors):
| self._boostFactors[:] = boostFactors[:]
|
'Gets the overlap duty cycles for all columns. ``overlapDutyCycles``
size must match the number of columns.
:param overlapDutyCycles: (list) will be overwritten.'
| def getOverlapDutyCycles(self, overlapDutyCycles):
| overlapDutyCycles[:] = self._overlapDutyCycles[:]
|
'Sets the overlap duty cycles for all columns. ``overlapDutyCycles``
size must match the number of columns.
:param overlapDutyCycles: (list) value to set.'
| def setOverlapDutyCycles(self, overlapDutyCycles):
| self._overlapDutyCycles[:] = overlapDutyCycles
|
'Gets the activity duty cycles for all columns. Input list will be
overwritten.
:param activeDutyCycles: (list) size must match number of columns.'
| def getActiveDutyCycles(self, activeDutyCycles):
| activeDutyCycles[:] = self._activeDutyCycles[:]
|
'Sets the activity duty cycles for all columns. ``activeDutyCycles`` size
must match the number of columns.
:param activeDutyCycles: (list) value to set.'
| def setActiveDutyCycles(self, activeDutyCycles):
| self._activeDutyCycles[:] = activeDutyCycles
|
':returns: (list) the minimum overlap duty cycles for all columns.
``minOverlapDutyCycles`` size must match the number of columns.'
| def getMinOverlapDutyCycles(self, minOverlapDutyCycles):
| minOverlapDutyCycles[:] = self._minOverlapDutyCycles[:]
|
'Sets the minimum overlap duty cycles for all columns.
``minOverlapDutyCycles`` size must match the number of columns.
:param minOverlapDutyCycles: (iter) value to set.'
| def setMinOverlapDutyCycles(self, minOverlapDutyCycles):
| self._minOverlapDutyCycles[:] = minOverlapDutyCycles[:]
|
':param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.'
| def getPotential(self, columnIndex, potential):
| assert (columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex]
|
'Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set.'
| def setPotential(self, columnIndex, potential):
| assert (columnIndex < self._numColumns)
potentialSparse = numpy.where((potential > 0))[0]
if (len(potentialSparse) < self._stimulusThreshold):
raise Exception((('This is likely due to a ' + 'value of stimulusThreshold that is too large relative ') + 'to the input size.'))
self._potentialPools.replace(columnIndex, potentialSparse)
|
'Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.'
| def getPermanence(self, columnIndex, permanence):
| assert (columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex]
|
'Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.'
| def setPermanence(self, columnIndex, permanence):
| assert (columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False)
|
':param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs'
| def getConnectedSynapses(self, columnIndex, connectedSynapses):
| assert (columnIndex < self._numColumns)
connectedSynapses[:] = self._connectedSynapses[columnIndex]
|
':param connectedCounts: (list) will be overwritten
:returns: (int) the number of connected synapses for all columns.
``connectedCounts`` size must match the number of columns.'
| def getConnectedCounts(self, connectedCounts):
| connectedCounts[:] = self._connectedCounts[:]
|
':returns: (iter) the overlap score for each column.'
| def getOverlaps(self):
| return self._overlaps
|
':returns: (list) the boosted overlap score for each column.'
| def getBoostedOverlaps(self):
| return self._boostedOverlaps
|
'This is the primary public method of the SpatialPooler class. This
function takes a input vector and outputs the indices of the active columns.
If \'learn\' is set to True, this method also updates the permanences of the
columns.
:param inputVector: A numpy array of 0\'s and 1\'s that comprises the input
to the spatial pooler. The array will be treated as a one dimensional
array, therefore the dimensions of the array do not have to match the
exact dimensions specified in the class constructor. In fact, even a
list would suffice. The number of input bits in the vector must,
however, match the number of bits specified by the call to the
constructor. Therefore there must be a \'0\' or \'1\' in the array for
every input bit.
:param learn: A boolean value indicating whether learning should be
performed. Learning entails updating the permanence values of the
synapses, and hence modifying the \'state\' of the model. Setting
learning to \'off\' freezes the SP and has many uses. For example, you
might want to feed in various inputs and examine the resulting SDR\'s.
:param activeArray: An array whose size is equal to the number of columns.
Before the function returns this array will be populated with 1\'s at
the indices of the active columns, and 0\'s everywhere else.'
| def compute(self, inputVector, learn, activeArray):
| if (not isinstance(inputVector, numpy.ndarray)):
raise TypeError(('Input vector must be a numpy array, not %s' % str(type(inputVector))))
if (inputVector.size != self._numInputs):
raise ValueError(("Input vector dimensions don't match. Expecting %s but got %s" % (inputVector.size, self._numInputs)))
self._updateBookeepingVars(learn)
inputVector = numpy.array(inputVector, dtype=realDType)
inputVector.reshape((-1))
self._overlaps = self._calculateOverlap(inputVector)
if learn:
self._boostedOverlaps = (self._boostFactors * self._overlaps)
else:
self._boostedOverlaps = self._overlaps
activeColumns = self._inhibitColumns(self._boostedOverlaps)
if learn:
self._adaptSynapses(inputVector, activeColumns)
self._updateDutyCycles(self._overlaps, activeColumns)
self._bumpUpWeakColumns()
self._updateBoostFactors()
if self._isUpdateRound():
self._updateInhibitionRadius()
self._updateMinDutyCycles()
activeArray.fill(0)
activeArray[activeColumns] = 1
|
'Removes the set of columns who have never been active from the set of
active columns selected in the inhibition round. Such columns cannot
represent learned pattern and are therefore meaningless if only inference
is required. This should not be done when using a random, unlearned SP
since you would end up with no active columns.
:param activeArray: An array whose size is equal to the number of columns.
Any columns marked as active with an activeDutyCycle of 0 have
never been activated before and therefore are not active due to
learning. Any of these (unlearned) columns will be disabled (set to 0).'
| def stripUnlearnedColumns(self, activeArray):
| neverLearned = numpy.where((self._activeDutyCycles == 0))[0]
activeArray[neverLearned] = 0
|
'Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.'
| def _updateMinDutyCycles(self):
| if (self._globalInhibition or (self._inhibitionRadius > self._numInputs)):
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal()
|
'Updates the minimum duty cycles in a global fashion. Sets the minimum duty
cycles for the overlap all columns to be a percent of the maximum in the
region, specified by minPctOverlapDutyCycle. Functionality it is equivalent
to _updateMinDutyCyclesLocal, but this function exploits the globality of
the computation to perform it in a straightforward, and efficient manner.'
| def _updateMinDutyCyclesGlobal(self):
| self._minOverlapDutyCycles.fill((self._minPctOverlapDutyCycles * self._overlapDutyCycles.max()))
|
'Updates the minimum duty cycles. The minimum duty cycles are determined
locally. Each column\'s minimum duty cycles are set to be a percent of the
maximum duty cycles in the column\'s neighborhood. Unlike
_updateMinDutyCyclesGlobal, here the values can be quite different for
different columns.'
| def _updateMinDutyCyclesLocal(self):
| for column in xrange(self._numColumns):
neighborhood = self._getColumnNeighborhood(column)
maxActiveDuty = self._activeDutyCycles[neighborhood].max()
maxOverlapDuty = self._overlapDutyCycles[neighborhood].max()
self._minOverlapDutyCycles[column] = (maxOverlapDuty * self._minPctOverlapDutyCycles)
|
'Updates the duty cycles for each column. The OVERLAP duty cycle is a moving
average of the number of inputs which overlapped with the each column. The
ACTIVITY duty cycles is a moving average of the frequency of activation for
each column.
Parameters:
:param overlaps:
An array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param activeColumns:
An array containing the indices of the active columns,
the sparse set of columns which survived inhibition'
| def _updateDutyCycles(self, overlaps, activeColumns):
| overlapArray = numpy.zeros(self._numColumns, dtype=realDType)
activeArray = numpy.zeros(self._numColumns, dtype=realDType)
overlapArray[(overlaps > 0)] = 1
activeArray[activeColumns] = 1
period = self._dutyCyclePeriod
if (period > self._iterationNum):
period = self._iterationNum
self._overlapDutyCycles = self._updateDutyCyclesHelper(self._overlapDutyCycles, overlapArray, period)
self._activeDutyCycles = self._updateDutyCyclesHelper(self._activeDutyCycles, activeArray, period)
|
'Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.'
| def _updateInhibitionRadius(self):
| if self._globalInhibition:
self._inhibitionRadius = int(self._columnDimensions.max())
return
avgConnectedSpan = numpy.average([self._avgConnectedSpanForColumnND(i) for i in xrange(self._numColumns)])
columnsPerInput = self._avgColumnsPerInput()
diameter = (avgConnectedSpan * columnsPerInput)
radius = ((diameter - 1) / 2.0)
radius = max(1.0, radius)
self._inhibitionRadius = int((radius + 0.5))
|
'The average number of columns per input, taking into account the topology
of the inputs and columns. This value is used to calculate the inhibition
radius. This function supports an arbitrary number of dimensions. If the
number of column dimensions does not match the number of input dimensions,
we treat the missing, or phantom dimensions as \'ones\'.'
| def _avgColumnsPerInput(self):
| numDim = max(self._columnDimensions.size, self._inputDimensions.size)
colDim = numpy.ones(numDim)
colDim[:self._columnDimensions.size] = self._columnDimensions
inputDim = numpy.ones(numDim)
inputDim[:self._inputDimensions.size] = self._inputDimensions
columnsPerInput = (colDim.astype(realDType) / inputDim)
return numpy.average(columnsPerInput)
|
'The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices'
| def _avgConnectedSpanForColumn1D(self, columnIndex):
| assert (self._inputDimensions.size == 1)
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if (connected.size == 0):
return 0
else:
return ((max(connected) - min(connected)) + 1)
|
'The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function only supports a 2 dimensional column topology.
Parameters:
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices'
| def _avgConnectedSpanForColumn2D(self, columnIndex):
| assert (self._inputDimensions.size == 2)
connected = self._connectedSynapses[columnIndex]
(rows, cols) = connected.reshape(self._inputDimensions).nonzero()
if ((rows.size == 0) and (cols.size == 0)):
return 0
rowSpan = ((rows.max() - rows.min()) + 1)
colSpan = ((cols.max() - cols.min()) + 1)
return numpy.average([rowSpan, colSpan])
|
'The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function supports arbitrary column dimensions.
Parameters:
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.'
| def _avgConnectedSpanForColumnND(self, columnIndex):
| dimensions = self._inputDimensions
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if (connected.size == 0):
return 0
maxCoord = numpy.empty(self._inputDimensions.size)
minCoord = numpy.empty(self._inputDimensions.size)
maxCoord.fill((-1))
minCoord.fill(max(self._inputDimensions))
for i in connected:
maxCoord = numpy.maximum(maxCoord, numpy.unravel_index(i, dimensions))
minCoord = numpy.minimum(minCoord, numpy.unravel_index(i, dimensions))
return numpy.average(((maxCoord - minCoord) + 1))
|
'The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
:param inputVector:
A numpy array of 0\'s and 1\'s that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
:param activeColumns:
An array containing the indices of the columns that
survived inhibition.'
| def _adaptSynapses(self, inputVector, activeColumns):
| inputIndices = numpy.where((inputVector > 0))[0]
permChanges = numpy.zeros(self._numInputs, dtype=realDType)
permChanges.fill(((-1) * self._synPermInactiveDec))
permChanges[inputIndices] = self._synPermActiveInc
for columnIndex in activeColumns:
perm = self._permanences[columnIndex]
maskPotential = numpy.where((self._potentialPools[columnIndex] > 0))[0]
perm[maskPotential] += permChanges[maskPotential]
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)
|
'This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.'
| def _bumpUpWeakColumns(self):
| weakColumns = numpy.where((self._overlapDutyCycles < self._minOverlapDutyCycles))[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where((self._potentialPools[columnIndex] > 0))[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
|
'This method ensures that each column has enough connections to input bits
to allow it to become active. Since a column must have at least
\'self._stimulusThreshold\' overlaps in order to be considered during the
inhibition phase, columns without such minimal number of connections, even
if all the input bits they are connected to turn on, have no chance of
obtaining the minimum threshold. For such columns, the permanence values
are increased until the minimum number of connections are formed.
Parameters:
:param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
:param mask: the indices of the columns whose permanences need to be
raised.'
| def _raisePermanenceToThreshold(self, perm, mask):
| if (len(mask) < self._stimulusThreshold):
raise Exception((('This is likely due to a ' + 'value of stimulusThreshold that is too large relative ') + 'to the input size. [len(mask) < self._stimulusThreshold]'))
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
while True:
numConnected = numpy.nonzero((perm > (self._synPermConnected - PERMANENCE_EPSILON)))[0].size
if (numConnected >= self._stimulusThreshold):
return
perm[mask] += self._synPermBelowStimulusInc
|
'This method updates the permanence matrix with a column\'s new permanence
values. The column is identified by its index, which reflects the row in
the matrix, and the permanence is given in \'dense\' form, i.e. a full
array containing all the zeros as well as the non-zero values. It is in
charge of implementing \'clipping\' - ensuring that the permanence values are
always between 0 and 1 - and \'trimming\' - enforcing sparsity by zeroing out
all permanence values below \'_synPermTrimThreshold\'. It also maintains
the consistency between \'self._permanences\' (the matrix storing the
permanence values), \'self._connectedSynapses\', (the matrix storing the bits
each column is connected to), and \'self._connectedCounts\' (an array storing
the number of input bits each column is connected to). Every method wishing
to modify the permanence matrix should do so through this method.
Parameters:
:param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
:param index: The index identifying a column in the permanence, potential
and connectivity matrices
:param raisePerm: A boolean value indicating whether the permanence values
should be raised until a minimum number are synapses are in
a connected state. Should be set to \'false\' when a direct
assignment is required.'
| def _updatePermanencesForColumn(self, perm, columnIndex, raisePerm=True):
| maskPotential = numpy.where((self._potentialPools[columnIndex] > 0))[0]
if raisePerm:
self._raisePermanenceToThreshold(perm, maskPotential)
perm[(perm < self._synPermTrimThreshold)] = 0
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
newConnected = numpy.where((perm >= (self._synPermConnected - PERMANENCE_EPSILON)))[0]
self._permanences.update(columnIndex, perm)
self._connectedSynapses.replace(columnIndex, newConnected)
self._connectedCounts[columnIndex] = newConnected.size
|
'Returns a randomly generated permanence value for a synapses that is
initialized in a connected state. The basic idea here is to initialize
permanence values very close to synPermConnected so that a small number of
learning steps could make it disconnected or connected.
Note: experimentation was done a long time ago on the best way to initialize
permanence values, but the history for this particular scheme has been lost.'
| def _initPermConnected(self):
| p = (self._synPermConnected + ((self._synPermMax - self._synPermConnected) * self._random.getReal64()))
p = (int((p * 100000)) / 100000.0)
return p
|
'Returns a randomly generated permanence value for a synapses that is to be
initialized in a non-connected state.'
| def _initPermNonConnected(self):
| p = (self._synPermConnected * self._random.getReal64())
p = (int((p * 100000)) / 100000.0)
return p
|
'Initializes the permanences of a column. The method
returns a 1-D array the size of the input, where each entry in the
array represents the initial permanence value between the input bit
at the particular index in the array, and the column represented by
the \'index\' parameter.
Parameters:
:param potential: A numpy array specifying the potential pool of the column.
Permanence values will only be generated for input bits
corresponding to indices for which the mask value is 1.
:param connectedPct: A value between 0 or 1 governing the chance, for each
permanence, that the initial permanence value will
be a value that is considered connected.'
| def _initPermanence(self, potential, connectedPct):
| perm = numpy.zeros(self._numInputs, dtype=realDType)
for i in xrange(self._numInputs):
if (potential[i] < 1):
continue
if (self._random.getReal64() <= connectedPct):
perm[i] = self._initPermConnected()
else:
perm[i] = self._initPermNonConnected()
perm[(perm < self._synPermTrimThreshold)] = 0
return perm
|
'Maps a column to its respective input index, keeping to the topology of
the region. It takes the index of the column as an argument and determines
what is the index of the flattened input vector that is to be the center of
the column\'s potential pool. It distributes the columns over the inputs
uniformly. The return value is an integer representing the index of the
input bit. Examples of the expected output of this method:
* If the topology is one dimensional, and the column index is 0, this
method will return the input index 0. If the column index is 1, and there
are 3 columns over 7 inputs, this method will return the input index 3.
* If the topology is two dimensional, with column dimensions [3, 5] and
input dimensions [7, 11], and the column index is 3, the method
returns input index 8.
Parameters:
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.
:param wrapAround: A boolean value indicating that boundaries should be
ignored.'
| def _mapColumn(self, index):
| columnCoords = numpy.unravel_index(index, self._columnDimensions)
columnCoords = numpy.array(columnCoords, dtype=realDType)
ratios = (columnCoords / self._columnDimensions)
inputCoords = (self._inputDimensions * ratios)
inputCoords += ((0.5 * self._inputDimensions) / self._columnDimensions)
inputCoords = inputCoords.astype(int)
inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions)
return inputIndex
|
'Maps a column to its input bits. This method encapsulates the topology of
the region. It takes the index of the column as an argument and determines
what are the indices of the input vector that are located within the
column\'s potential pool. The return value is a list containing the indices
of the input bits. The current implementation of the base class only
supports a 1 dimensional topology of columns with a 1 dimensional topology
of inputs. To extend this class to support 2-D topology you will need to
override this method. Examples of the expected output of this method:
* If the potentialRadius is greater than or equal to the largest input
dimension then each column connects to all of the inputs.
* If the topology is one dimensional, the input space is divided up evenly
among the columns and each column is centered over its share of the
inputs. If the potentialRadius is 5, then each column connects to the
input it is centered above as well as the 5 inputs to the left of that
input and the five inputs to the right of that input, wrapping around if
wrapAround=True.
* If the topology is two dimensional, the input space is again divided up
evenly among the columns and each column is centered above its share of
the inputs. If the potentialRadius is 5, the column connects to a square
that has 11 inputs on a side and is centered on the input that the column
is centered above.
Parameters:
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.'
| def _mapPotential(self, index):
| centerInput = self._mapColumn(index)
columnInputs = self._getInputNeighborhood(centerInput).astype(uintType)
numPotential = int(((columnInputs.size * self._potentialPct) + 0.5))
selectedInputs = numpy.empty(numPotential, dtype=uintType)
self._random.sample(columnInputs, selectedInputs)
potential = numpy.zeros(self._numInputs, dtype=uintType)
potential[selectedInputs] = 1
return potential
|
'Updates a duty cycle estimate with a new value. This is a helper
function that is used to update several duty cycle variables in
the Column class, such as: overlapDutyCucle, activeDutyCycle,
minPctDutyCycleBeforeInh, minPctDutyCycleAfterInh, etc. returns
the updated duty cycle. Duty cycles are updated according to the following
formula:
(period - 1)*dutyCycle + newValue
dutyCycle := ----------------------------------
period
Parameters:
:param dutyCycles: An array containing one or more duty cycle values that need
to be updated
:param newInput: A new numerical value used to update the duty cycle
:param period: The period of the duty cycle'
| @staticmethod
def _updateDutyCyclesHelper(dutyCycles, newInput, period):
| assert (period >= 1)
return (((dutyCycles * (period - 1.0)) + newInput) / period)
|
'Update the boost factors for all columns. The boost factors are used to
increase the overlap of inactive columns to improve their chances of
becoming active, and hence encourage participation of more columns in the
learning process. The boosting function is a curve defined as:
boostFactors = exp[ - boostStrength * (dutyCycle - targetDensity)]
Intuitively this means that columns that have been active at the target
activation level have a boost factor of 1, meaning their overlap is not
boosted. Columns whose active duty cycle drops too much below that of their
neighbors are boosted depending on how infrequently they have been active.
Columns that has been active more than the target activation level have
a boost factor below 1, meaning their overlap is suppressed
The boostFactor depends on the activeDutyCycle via an exponential function:
boostFactor
| | 1 _ | | _
+--------------------> activeDutyCycle
targetDensity'
| def _updateBoostFactors(self):
| if self._globalInhibition:
self._updateBoostFactorsGlobal()
else:
self._updateBoostFactorsLocal()
|
'Update boost factors when global inhibition is used'
| def _updateBoostFactorsGlobal(self):
| if (self._localAreaDensity > 0):
targetDensity = self._localAreaDensity
else:
inhibitionArea = (((2 * self._inhibitionRadius) + 1) ** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
targetDensity = (float(self._numActiveColumnsPerInhArea) / inhibitionArea)
targetDensity = min(targetDensity, 0.5)
self._boostFactors = numpy.exp(((targetDensity - self._activeDutyCycles) * self._boostStrength))
|
'Update boost factors when local inhibition is used'
| def _updateBoostFactorsLocal(self):
| targetDensity = numpy.zeros(self._numColumns, dtype=realDType)
for i in xrange(self._numColumns):
maskNeighbors = self._getColumnNeighborhood(i)
targetDensity[i] = numpy.mean(self._activeDutyCycles[maskNeighbors])
self._boostFactors = numpy.exp(((targetDensity - self._activeDutyCycles) * self._boostStrength))
|
'Updates counter instance variables each round.
Parameters:
:param learn: a boolean value indicating whether learning should be
performed. Learning entails updating the permanence
values of the synapses, and hence modifying the \'state\'
of the model. setting learning to \'off\' might be useful
for indicating separate training vs. testing sets.'
| def _updateBookeepingVars(self, learn):
| self._iterationNum += 1
if learn:
self._iterationLearnNum += 1
|
'This function determines each column\'s overlap with the current input
vector. The overlap of a column is the number of synapses for that column
that are connected (permanence value is greater than \'_synPermConnected\')
to input bits which are turned on. The implementation takes advantage of
the SparseBinaryMatrix class to perform this calculation efficiently.
Parameters:
:param inputVector: a numpy array of 0\'s and 1\'s that comprises the input to
the spatial pooler.'
| def _calculateOverlap(self, inputVector):
| overlaps = numpy.zeros(self._numColumns, dtype=realDType)
self._connectedSynapses.rightVecSumAtNZ_fast(inputVector.astype(realDType), overlaps)
return overlaps
|
'Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.'
| def _inhibitColumns(self, overlaps):
| if (self._localAreaDensity > 0):
density = self._localAreaDensity
else:
inhibitionArea = (((2 * self._inhibitionRadius) + 1) ** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
density = (float(self._numActiveColumnsPerInhArea) / inhibitionArea)
density = min(density, 0.5)
if (self._globalInhibition or (self._inhibitionRadius > max(self._columnDimensions))):
return self._inhibitColumnsGlobal(overlaps, density)
else:
return self._inhibitColumnsLocal(overlaps, density)
|
'Perform global inhibition. Performing global inhibition entails picking the
top \'numActive\' columns with the highest overlap score in the entire
region. At most half of the columns in a local neighborhood are allowed to
be active. Columns with an overlap score below the \'stimulusThreshold\' are
always inhibited.
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param density: The fraction of columns to survive inhibition.
@return list with indices of the winning columns'
| def _inhibitColumnsGlobal(self, overlaps, density):
| numActive = int((density * self._numColumns))
sortedWinnerIndices = numpy.argsort(overlaps, kind='mergesort')
start = (len(sortedWinnerIndices) - numActive)
while (start < len(sortedWinnerIndices)):
i = sortedWinnerIndices[start]
if (overlaps[i] >= self._stimulusThreshold):
break
else:
start += 1
return sortedWinnerIndices[start:][::(-1)]
|
'Performs local inhibition. Local inhibition is performed on a column by
column basis. Each column observes the overlaps of its neighbors and is
selected if its overlap score is within the top \'numActive\' in its local
neighborhood. At most half of the columns in a local neighborhood are
allowed to be active. Columns with an overlap score below the
\'stimulusThreshold\' are always inhibited.
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param density: The fraction of columns to survive inhibition. This
value is only an intended target. Since the surviving
columns are picked in a local fashion, the exact fraction
of surviving columns is likely to vary.
@return list with indices of the winning columns'
| def _inhibitColumnsLocal(self, overlaps, density):
| activeArray = numpy.zeros(self._numColumns, dtype='bool')
for (column, overlap) in enumerate(overlaps):
if (overlap >= self._stimulusThreshold):
neighborhood = self._getColumnNeighborhood(column)
neighborhoodOverlaps = overlaps[neighborhood]
numBigger = numpy.count_nonzero((neighborhoodOverlaps > overlap))
ties = numpy.where((neighborhoodOverlaps == overlap))
tiedNeighbors = neighborhood[ties]
numTiesLost = numpy.count_nonzero(activeArray[tiedNeighbors])
numActive = int((0.5 + (density * len(neighborhood))))
if ((numBigger + numTiesLost) < numActive):
activeArray[column] = True
return activeArray.nonzero()[0]
|
'returns true if enough rounds have passed to warrant updates of
duty cycles'
| def _isUpdateRound(self):
| return ((self._iterationNum % self._updatePeriod) == 0)
|
'Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood.'
| def _getColumnNeighborhood(self, centerColumn):
| if self._wrapAround:
return topology.wrappingNeighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions)
else:
return topology.neighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions)
|
'Gets a neighborhood of inputs.
Simply calls topology.wrappingNeighborhood or topology.neighborhood.
A subclass can insert different topology behavior by overriding this method.
:param centerInput (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The inputs in the neighborhood.'
| def _getInputNeighborhood(self, centerInput):
| if self._wrapAround:
return topology.wrappingNeighborhood(centerInput, self._potentialRadius, self._inputDimensions)
else:
return topology.neighborhood(centerInput, self._potentialRadius, self._inputDimensions)
|
'Initialize the random seed'
| def _seed(self, seed=(-1)):
| if (seed != (-1)):
self._random = NupicRandom(seed)
else:
self._random = NupicRandom()
|
'Initialize class properties from stored values.'
| def __setstate__(self, state):
| if (state['_version'] < 2):
state['_wrapAround'] = True
if (state['_version'] < 3):
state['_overlaps'] = numpy.zeros(self._numColumns, dtype=realDType)
state['_boostedOverlaps'] = numpy.zeros(self._numColumns, dtype=realDType)
state['_version'] = VERSION
self.__dict__.update(state)
|
'Useful for debugging.'
| def printParameters(self):
| print '------------PY SpatialPooler Parameters ------------------'
print 'numInputs = ', self.getNumInputs()
print 'numColumns = ', self.getNumColumns()
print 'columnDimensions = ', self._columnDimensions
print 'numActiveColumnsPerInhArea = ', self.getNumActiveColumnsPerInhArea()
print 'potentialPct = ', self.getPotentialPct()
print 'globalInhibition = ', self.getGlobalInhibition()
print 'localAreaDensity = ', self.getLocalAreaDensity()
print 'stimulusThreshold = ', self.getStimulusThreshold()
print 'synPermActiveInc = ', self.getSynPermActiveInc()
print 'synPermInactiveDec = ', self.getSynPermInactiveDec()
print 'synPermConnected = ', self.getSynPermConnected()
print 'minPctOverlapDutyCycle = ', self.getMinPctOverlapDutyCycles()
print 'dutyCyclePeriod = ', self.getDutyCyclePeriod()
print 'boostStrength = ', self.getBoostStrength()
print 'spVerbosity = ', self.getSpVerbosity()
print 'version = ', self._version
|
'NOTE: Anomaly likelihood scores are reported at a flat 0.5 for
learningPeriod + estimationSamples iterations.
claLearningPeriod and learningPeriod are specifying the same variable,
although claLearningPeriod is a deprecated name for it.
:param learningPeriod: (claLearningPeriod: deprecated) - (int) the number of
iterations required for the algorithm to learn the basic patterns in the
dataset and for the anomaly score to \'settle down\'. The default is based
on empirical observations but in reality this could be larger for more
complex domains. The downside if this is too large is that real anomalies
might get ignored and not flagged.
:param estimationSamples: (int) the number of reasonable anomaly scores
required for the initial estimate of the Gaussian. The default of 100
records is reasonable - we just need sufficient samples to get a decent
estimate for the Gaussian. It\'s unlikely you will need to tune this since
the Gaussian is re-estimated every 10 iterations by default.
:param historicWindowSize: (int) size of sliding window of historical
data points to maintain for periodic reestimation of the Gaussian. Note:
the default of 8640 is based on a month\'s worth of history at 5-minute
intervals.
:param reestimationPeriod: (int) how often we re-estimate the Gaussian
distribution. The ideal is to re-estimate every iteration but this is a
performance hit. In general the system is not very sensitive to this
number as long as it is small relative to the total number of records
processed.'
| def __init__(self, claLearningPeriod=None, learningPeriod=288, estimationSamples=100, historicWindowSize=8640, reestimationPeriod=100):
| if (historicWindowSize < estimationSamples):
raise ValueError('estimationSamples exceeds historicWindowSize')
self._iteration = 0
self._historicalScores = collections.deque(maxlen=historicWindowSize)
self._distribution = None
if (claLearningPeriod != None):
print 'claLearningPeriod is deprecated, use learningPeriod instead.'
self._learningPeriod = claLearningPeriod
else:
self._learningPeriod = learningPeriod
self._probationaryPeriod = (self._learningPeriod + estimationSamples)
self._reestimationPeriod = reestimationPeriod
|
'Compute a log scale representation of the likelihood value. Since the
likelihood computations return low probabilities that often go into four 9\'s
or five 9\'s, a log value is more useful for visualization, thresholding,
etc.'
| @staticmethod
def computeLogLikelihood(likelihood):
| return (math.log((1.0000000001 - likelihood)) / (-23.02585084720009))
|
'Return the value of skipRecords for passing to estimateAnomalyLikelihoods
If `windowSize` is very large (bigger than the amount of data) then this
could just return `learningPeriod`. But when some values have fallen out of
the historical sliding window of anomaly records, then we have to take those
into account as well so we return the `learningPeriod` minus the number
shifted out.
:param numIngested - (int) number of data points that have been added to the
sliding window of historical data points.
:param windowSize - (int) size of sliding window of historical data points.
:param learningPeriod - (int) the number of iterations required for the
algorithm to learn the basic patterns in the dataset and for the anomaly
score to \'settle down\'.'
| @staticmethod
def _calcSkipRecords(numIngested, windowSize, learningPeriod):
| numShiftedOut = max(0, (numIngested - windowSize))
return min(numIngested, max(0, (learningPeriod - numShiftedOut)))
|
'capnp deserialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp
:returns: (Object) the deserialized AnomalyLikelihood object'
| @classmethod
def read(cls, proto):
| anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(maxlen=proto.historicWindowSize)
for (i, score) in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append((i, score.value, score.anomalyScore))
if proto.distribution.name:
anomalyLikelihood._distribution = {}
anomalyLikelihood._distribution['name'] = proto.distribution.name
anomalyLikelihood._distribution['mean'] = proto.distribution.mean
anomalyLikelihood._distribution['variance'] = proto.distribution.variance
anomalyLikelihood._distribution['stdev'] = proto.distribution.stdev
anomalyLikelihood._distribution['movingAverage'] = {}
anomalyLikelihood._distribution['movingAverage']['windowSize'] = proto.distribution.movingAverage.windowSize
anomalyLikelihood._distribution['movingAverage']['historicalValues'] = []
for value in proto.distribution.movingAverage.historicalValues:
anomalyLikelihood._distribution['movingAverage']['historicalValues'].append(value)
anomalyLikelihood._distribution['movingAverage']['total'] = proto.distribution.movingAverage.total
anomalyLikelihood._distribution['historicalLikelihoods'] = []
for likelihood in proto.distribution.historicalLikelihoods:
anomalyLikelihood._distribution['historicalLikelihoods'].append(likelihood)
else:
anomalyLikelihood._distribution = None
anomalyLikelihood._probationaryPeriod = proto.probationaryPeriod
anomalyLikelihood._learningPeriod = proto.learningPeriod
anomalyLikelihood._reestimationPeriod = proto.reestimationPeriod
return anomalyLikelihood
|
'capnp serialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp'
| def write(self, proto):
| proto.iteration = self._iteration
pHistScores = proto.init('historicalScores', len(self._historicalScores))
for (i, score) in enumerate(list(self._historicalScores)):
(_, value, anomalyScore) = score
record = pHistScores[i]
record.value = float(value)
record.anomalyScore = float(anomalyScore)
if self._distribution:
proto.distribution.name = self._distribution['distributionParams']['name']
proto.distribution.mean = self._distribution['distributionParams']['mean']
proto.distribution.variance = self._distribution['distributionParams']['variance']
proto.distribution.stdev = self._distribution['distributionParams']['stdev']
proto.distribution.movingAverage.windowSize = self._distribution['movingAverage']['windowSize']
historicalValues = self._distribution['movingAverage']['historicalValues']
pHistValues = proto.distribution.movingAverage.init('historicalValues', len(historicalValues))
for (i, value) in enumerate(historicalValues):
pHistValues[i] = float(value)
proto.distribution.movingAverage.historicalValues = self._distribution['movingAverage']['historicalValues']
proto.distribution.movingAverage.total = self._distribution['movingAverage']['total']
historicalLikelihoods = self._distribution['historicalLikelihoods']
pHistLikelihoods = proto.distribution.init('historicalLikelihoods', len(historicalLikelihoods))
for (i, likelihood) in enumerate(historicalLikelihoods):
pHistLikelihoods[i] = float(likelihood)
proto.probationaryPeriod = self._probationaryPeriod
proto.learningPeriod = self._learningPeriod
proto.reestimationPeriod = self._reestimationPeriod
proto.historicWindowSize = self._historicalScores.maxlen
|
'Compute the probability that the current value plus anomaly score represents
an anomaly given the historical distribution of anomaly scores. The closer
the number is to 1, the higher the chance it is an anomaly.
:param value: the current metric ("raw") input value, eg. "orange", or
\'21.2\' (deg. Celsius), ...
:param anomalyScore: the current anomaly score
:param timestamp: [optional] timestamp of the ocurrence,
default (None) results in using iteration step.
:returns: the anomalyLikelihood for this record.'
| def anomalyProbability(self, value, anomalyScore, timestamp=None):
| if (timestamp is None):
timestamp = self._iteration
dataPoint = (timestamp, value, anomalyScore)
if (self._iteration < self._probationaryPeriod):
likelihood = 0.5
else:
if ((self._distribution is None) or ((self._iteration % self._reestimationPeriod) == 0)):
numSkipRecords = self._calcSkipRecords(numIngested=self._iteration, windowSize=self._historicalScores.maxlen, learningPeriod=self._learningPeriod)
(_, _, self._distribution) = estimateAnomalyLikelihoods(self._historicalScores, skipRecords=numSkipRecords)
(likelihoods, _, self._distribution) = updateAnomalyLikelihoods([dataPoint], self._distribution)
likelihood = (1.0 - likelihoods[0])
self._historicalScores.append(dataPoint)
self._iteration += 1
return likelihood
|
'@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this trace
@param title (string) Title'
| def __init__(self, monitor, title):
| self.monitor = monitor
self.title = title
self.data = []
|
'@param datum (object) Datum from `self.data` to pretty-print
@return (string) Pretty-printed datum'
| @staticmethod
def prettyPrintDatum(datum):
| return (str(datum) if (datum is not None) else '')
|
'@return (CountsTrace) A new Trace made up of counts of this trace\'s indices.'
| def makeCountsTrace(self):
| trace = CountsTrace(self.monitor, '# {0}'.format(self.title))
trace.data = [len(indices) for indices in self.data]
return trace
|
'@return (CountsTrace) A new Trace made up of cumulative counts of this
trace\'s indices.'
| def makeCumCountsTrace(self):
| trace = CountsTrace(self.monitor, '# (cumulative) {0}'.format(self.title))
countsTrace = self.makeCountsTrace()
def accumulate(iterator):
total = 0
for item in iterator:
total += item
(yield total)
trace.data = list(accumulate(countsTrace.data))
return trace
|
'@return (Trace) Trace of active columns'
| def mmGetTraceActiveColumns(self):
| return self._mmTraces['activeColumns']
|
'@return (Trace) Trace of predictive cells'
| def mmGetTracePredictiveCells(self):
| return self._mmTraces['predictiveCells']
|
'@return (Trace) Trace of # segments'
| def mmGetTraceNumSegments(self):
| return self._mmTraces['numSegments']
|
'@return (Trace) Trace of # synapses'
| def mmGetTraceNumSynapses(self):
| return self._mmTraces['numSynapses']
|
'@return (Trace) Trace of sequence labels'
| def mmGetTraceSequenceLabels(self):
| return self._mmTraces['sequenceLabels']
|
'@return (Trace) Trace of resets'
| def mmGetTraceResets(self):
| return self._mmTraces['resets']
|
'@return (Trace) Trace of predicted => active cells'
| def mmGetTracePredictedActiveCells(self):
| self._mmComputeTransitionTraces()
return self._mmTraces['predictedActiveCells']
|
'@return (Trace) Trace of predicted => inactive cells'
| def mmGetTracePredictedInactiveCells(self):
| self._mmComputeTransitionTraces()
return self._mmTraces['predictedInactiveCells']
|
'@return (Trace) Trace of predicted => active columns'
| def mmGetTracePredictedActiveColumns(self):
| self._mmComputeTransitionTraces()
return self._mmTraces['predictedActiveColumns']
|
'@return (Trace) Trace of predicted => inactive columns'
| def mmGetTracePredictedInactiveColumns(self):
| self._mmComputeTransitionTraces()
return self._mmTraces['predictedInactiveColumns']
|
'@return (Trace) Trace of unpredicted => active columns'
| def mmGetTraceUnpredictedActiveColumns(self):
| self._mmComputeTransitionTraces()
return self._mmTraces['unpredictedActiveColumns']
|
'Convenience method to compute a metric over an indices trace, excluding
resets.
@param (IndicesTrace) Trace of indices
@return (Metric) Metric over trace excluding resets'
| def mmGetMetricFromTrace(self, trace):
| return Metric.createFromTrace(trace.makeCountsTrace(), excludeResets=self.mmGetTraceResets())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.