desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'inputRecord - dict containing the input to the sensor Return a \'SensorInput\' object, which represents the \'parsed\' representation of the input record'
def _getSensorInputRecord(self, inputRecord):
sensor = self._getSensorRegion() dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut')) dataDict = copy.deepcopy(inputRecord) inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings') inputRecordCategory = int(sensor.getOutputData('categoryOut')[0]) resetOut = sensor.getOutputData('resetOut')[0] return SensorInput(dataRow=dataRow, dataDict=dataDict, dataEncodings=inputRecordEncodings, sequenceReset=resetOut, category=inputRecordCategory)
'inputRecord - dict containing the input to the sensor Return a \'ClassifierInput\' object, which contains the mapped bucket index for input Record'
def _getClassifierInputRecord(self, inputRecord):
absoluteValue = None bucketIdx = None if ((self._predictedFieldName is not None) and (self._classifierInputEncoder is not None)): absoluteValue = inputRecord[self._predictedFieldName] bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0] return ClassifierInput(dataRow=absoluteValue, bucketIndex=bucketIdx)
'Compute Anomaly score, if required'
def _anomalyCompute(self):
inferenceType = self.getInferenceType() inferences = {} sp = self._getSPRegion() score = None if (inferenceType == InferenceType.NontemporalAnomaly): score = sp.getOutputData('anomalyScore')[0] elif (inferenceType == InferenceType.TemporalAnomaly): tm = self._getTPRegion() if (sp is not None): activeColumns = sp.getOutputData('bottomUpOut').nonzero()[0] else: sensor = self._getSensorRegion() activeColumns = sensor.getOutputData('dataOut').nonzero()[0] if (not (self._predictedFieldName in self._input)): raise ValueError(("Expected predicted field '%s' in input row, but was not found!" % self._predictedFieldName)) score = tm.getOutputData('anomalyScore')[0] if (sp is not None): self._getAnomalyClassifier().setParameter('activeColumnCount', len(activeColumns)) self._getAnomalyClassifier().prepareInputs() self._getAnomalyClassifier().compute() labels = self._getAnomalyClassifier().getSelf().getLabelResults() inferences[InferenceElement.anomalyLabel] = ('%s' % labels) inferences[InferenceElement.anomalyScore] = score return inferences
'Handle the CLA Classifier compute logic when implementing multi-step prediction. This is where the patternNZ is associated with one of the other fields from the dataset 0 to N steps in the future. This method is used by each type of network (encoder only, SP only, SP +TM) to handle the compute logic through the CLA Classifier. It fills in the inference dict with the results of the compute. Parameters: patternNZ: The input to the CLA Classifier as a list of active input indices inputTSRecordIdx: The index of the record as computed from the timestamp and aggregation interval. This normally increments by 1 each time unless there are missing records. If there is no aggregation interval or timestamp in the data, this will be None. rawInput: The raw input to the sensor, as a dict.'
def _handleSDRClassifierMultiStep(self, patternNZ, inputTSRecordIdx, rawInput):
inferenceArgs = self.getInferenceArgs() predictedFieldName = inferenceArgs.get('predictedField', None) if (predictedFieldName is None): raise ValueError('No predicted field was enabled! Did you call enableInference()?') self._predictedFieldName = predictedFieldName classifier = self._getClassifierRegion() if ((not self._hasCL) or (classifier is None)): return {} sensor = self._getSensorRegion() minLikelihoodThreshold = self._minLikelihoodThreshold maxPredictionsPerStep = self._maxPredictionsPerStep needLearning = self.isLearningEnabled() inferences = {} if (self._classifierInputEncoder is None): if (predictedFieldName is None): raise RuntimeError("This experiment description is missing the 'predictedField' in its config, which is required for multi-step prediction inference.") encoderList = sensor.getSelf().encoder.getEncoderList() self._numFields = len(encoderList) fieldNames = sensor.getSelf().encoder.getScalarNames() if (predictedFieldName in fieldNames): self._predictedFieldIdx = fieldNames.index(predictedFieldName) else: self._predictedFieldIdx = None if (sensor.getSelf().disabledEncoder is not None): encoderList = sensor.getSelf().disabledEncoder.getEncoderList() else: encoderList = [] if (len(encoderList) >= 1): fieldNames = sensor.getSelf().disabledEncoder.getScalarNames() self._classifierInputEncoder = encoderList[fieldNames.index(predictedFieldName)] else: encoderList = sensor.getSelf().encoder.getEncoderList() self._classifierInputEncoder = encoderList[self._predictedFieldIdx] if (not (predictedFieldName in rawInput)): raise ValueError(("Input row does not contain a value for the predicted field configured for this model. Missing value for '%s'" % predictedFieldName)) absoluteValue = rawInput[predictedFieldName] bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0] if isinstance(self._classifierInputEncoder, DeltaEncoder): if (not hasattr(self, '_ms_prevVal')): self._ms_prevVal = absoluteValue prevValue = self._ms_prevVal self._ms_prevVal = absoluteValue actualValue = (absoluteValue - prevValue) else: actualValue = absoluteValue if (isinstance(actualValue, float) and math.isnan(actualValue)): actualValue = SENTINEL_VALUE_FOR_MISSING_DATA classifier.setParameter('inferenceMode', True) classifier.setParameter('learningMode', needLearning) classificationIn = {'bucketIdx': bucketIdx, 'actValue': actualValue} if (inputTSRecordIdx is not None): recordNum = inputTSRecordIdx else: recordNum = self.__numRunCalls clResults = classifier.getSelf().customCompute(recordNum=recordNum, patternNZ=patternNZ, classification=classificationIn) predictionSteps = classifier.getParameter('steps') predictionSteps = [int(x) for x in predictionSteps.split(',')] inferences[InferenceElement.multiStepPredictions] = dict() inferences[InferenceElement.multiStepBestPredictions] = dict() inferences[InferenceElement.multiStepBucketLikelihoods] = dict() for steps in predictionSteps: likelihoodsVec = clResults[steps] bucketValues = clResults['actualValues'] likelihoodsDict = dict() bestActValue = None bestProb = None for (actValue, prob) in zip(bucketValues, likelihoodsVec): if (actValue in likelihoodsDict): likelihoodsDict[actValue] += prob else: likelihoodsDict[actValue] = prob if ((bestProb is None) or (likelihoodsDict[actValue] > bestProb)): bestProb = likelihoodsDict[actValue] bestActValue = actValue likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep) bucketLikelihood = {} for k in likelihoodsDict.keys(): bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = likelihoodsDict[k] if isinstance(self._classifierInputEncoder, DeltaEncoder): if (not hasattr(self, '_ms_predHistories')): self._ms_predHistories = dict() predHistories = self._ms_predHistories if (not (steps in predHistories)): predHistories[steps] = deque() predHistory = predHistories[steps] sumDelta = sum(predHistory) offsetDict = dict() for (k, v) in likelihoodsDict.iteritems(): if (k is not None): offsetDict[((absoluteValue + float(k)) + sumDelta)] = v bucketLikelihoodOffset = {} for k in offsetDict.keys(): bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = offsetDict[k] if (bestActValue is not None): predHistory.append(bestActValue) if (len(predHistory) >= steps): predHistory.popleft() if (len(offsetDict) > 0): inferences[InferenceElement.multiStepPredictions][steps] = offsetDict inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset else: inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood if (bestActValue is None): inferences[InferenceElement.multiStepBestPredictions][steps] = None else: inferences[InferenceElement.multiStepBestPredictions][steps] = ((absoluteValue + sumDelta) + bestActValue) else: inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict inferences[InferenceElement.multiStepBestPredictions][steps] = bestActValue inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood return inferences
'Remove entries with 0 likelihood or likelihood less than minLikelihoodThreshold, but don\'t leave an empty dict.'
@classmethod def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep):
maxVal = (None, None) for (k, v) in likelihoodsDict.items(): if (len(likelihoodsDict) <= 1): break if ((maxVal[0] is None) or (v >= maxVal[1])): if ((maxVal[0] is not None) and (maxVal[1] < minLikelihoodThreshold)): del likelihoodsDict[maxVal[0]] maxVal = (k, v) elif (v < minLikelihoodThreshold): del likelihoodsDict[k] likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(), key=itemgetter(1), reverse=True)[:maxPredictionsPerStep]) return likelihoodsDict
'Only returns data for a stat called ``numRunCalls``. :return:'
def getRuntimeStats(self):
ret = {'numRunCalls': self.__numRunCalls} temporalStats = dict() if self._hasTP: for stat in self._netInfo.statsCollectors: sdict = stat.getStats() temporalStats.update(sdict) ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats return ret
'Get the logger for this object. This is a protected method that is used by the Model to access the logger created by the subclass return: A logging.Logger object. Should not be None'
def _getLogger(self):
return self.__logger
'Returns reference to the network\'s SP region'
def _getSPRegion(self):
return self._netInfo.net.regions.get('SP', None)
'Returns reference to the network\'s TM region'
def _getTPRegion(self):
return self._netInfo.net.regions.get('TM', None)
'Returns reference to the network\'s Sensor region'
def _getSensorRegion(self):
return self._netInfo.net.regions['sensor']
'Returns reference to the network\'s Classifier region'
def _getClassifierRegion(self):
if ((self._netInfo.net is not None) and ('Classifier' in self._netInfo.net.regions)): return self._netInfo.net.regions['Classifier'] else: return None
'Returns: sensor region\'s encoder for the given network'
def _getEncoder(self):
return self._getSensorRegion().getSelf().encoder
'Returns: sensor region\'s encoder that is sent only to the classifier, not to the bottom of the network'
def _getClassifierOnlyEncoder(self):
return self._getSensorRegion().getSelf().disabledEncoder
'Returns: data source that we installed in sensor region'
def _getDataSource(self):
return self._getSensorRegion().getSelf().dataSource
'Create a CLA network and return it. description: HTMPredictionModel description dictionary (TODO: define schema) Returns: NetworkInfo instance;'
def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable, clParams, anomalyParams):
n = Network() n.addRegion('sensor', 'py.RecordSensor', json.dumps(dict(verbosity=sensorParams['verbosity']))) sensor = n.regions['sensor'].getSelf() enabledEncoders = copy.deepcopy(sensorParams['encoders']) for (name, params) in enabledEncoders.items(): if (params is not None): classifierOnly = params.pop('classifierOnly', False) if classifierOnly: enabledEncoders.pop(name) disabledEncoders = copy.deepcopy(sensorParams['encoders']) for (name, params) in disabledEncoders.items(): if (params is None): disabledEncoders.pop(name) else: classifierOnly = params.pop('classifierOnly', False) if (not classifierOnly): disabledEncoders.pop(name) encoder = MultiEncoder(enabledEncoders) sensor.encoder = encoder sensor.disabledEncoder = MultiEncoder(disabledEncoders) sensor.dataSource = DataBuffer() prevRegion = 'sensor' prevRegionWidth = encoder.getWidth() if spEnable: spParams = spParams.copy() spParams['inputWidth'] = prevRegionWidth self.__logger.debug(('Adding SPRegion; spParams: %r' % spParams)) n.addRegion('SP', 'py.SPRegion', json.dumps(spParams)) n.link('sensor', 'SP', 'UniformLink', '') n.link('sensor', 'SP', 'UniformLink', '', srcOutput='resetOut', destInput='resetIn') n.link('SP', 'sensor', 'UniformLink', '', srcOutput='spatialTopDownOut', destInput='spatialTopDownIn') n.link('SP', 'sensor', 'UniformLink', '', srcOutput='temporalTopDownOut', destInput='temporalTopDownIn') prevRegion = 'SP' prevRegionWidth = spParams['columnCount'] if tmEnable: tmParams = tmParams.copy() if (prevRegion == 'sensor'): tmParams['inputWidth'] = tmParams['columnCount'] = prevRegionWidth else: assert (tmParams['columnCount'] == prevRegionWidth) tmParams['inputWidth'] = tmParams['columnCount'] self.__logger.debug(('Adding TMRegion; tmParams: %r' % tmParams)) n.addRegion('TM', 'py.TMRegion', json.dumps(tmParams)) n.link(prevRegion, 'TM', 'UniformLink', '') if (prevRegion != 'sensor'): n.link('TM', prevRegion, 'UniformLink', '', srcOutput='topDownOut', destInput='topDownIn') else: n.link('TM', prevRegion, 'UniformLink', '', srcOutput='topDownOut', destInput='temporalTopDownIn') n.link('sensor', 'TM', 'UniformLink', '', srcOutput='resetOut', destInput='resetIn') prevRegion = 'TM' prevRegionWidth = tmParams['inputWidth'] if (clEnable and (clParams is not None)): clParams = clParams.copy() clRegionName = clParams.pop('regionName') self.__logger.debug(('Adding %s; clParams: %r' % (clRegionName, clParams))) n.addRegion('Classifier', ('py.%s' % str(clRegionName)), json.dumps(clParams)) if (str(clRegionName) == 'SDRClassifierRegion'): n.link('sensor', 'Classifier', 'UniformLink', '', srcOutput='actValueOut', destInput='actValueIn') n.link('sensor', 'Classifier', 'UniformLink', '', srcOutput='bucketIdxOut', destInput='bucketIdxIn') n.link('sensor', 'Classifier', 'UniformLink', '', srcOutput='categoryOut', destInput='categoryIn') n.link(prevRegion, 'Classifier', 'UniformLink', '') if (self.getInferenceType() == InferenceType.TemporalAnomaly): anomalyClParams = dict(trainRecords=anomalyParams.get('autoDetectWaitRecords', None), cacheSize=anomalyParams.get('anomalyCacheRecords', None)) self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable) n.initialize() return NetworkInfo(net=n, statsCollectors=[])
'Return serializable state. This function will return a version of the __dict__ with data that shouldn\'t be pickled stripped out. In particular, the CLA Network is stripped out because it has it\'s own serialization mechanism) See also: _serializeExtraData()'
def __getstate__(self):
state = self.__dict__.copy() state['_netInfo'] = NetworkInfo(net=None, statsCollectors=self._netInfo.statsCollectors) for ephemeral in [self.__manglePrivateMemberName('__restoringFromState'), self.__manglePrivateMemberName('__logger')]: state.pop(ephemeral) return state
'Set the state of ourself from a serialized state. See also: _deSerializeExtraData'
def __setstate__(self, state):
self.__dict__.update(state) self.__restoringFromState = True self.__logger = initLogger(self) if (not hasattr(self, '_Model__inferenceType')): self.__restoringFromV1 = True self._hasSP = True if (self.__temporalNetInfo is not None): self._Model__inferenceType = InferenceType.TemporalNextStep self._netInfo = self.__temporalNetInfo self._hasTP = True else: raise RuntimeError('The Nontemporal inference type is not supported') self._Model__inferenceArgs = {} self._Model__learningEnabled = True self._Model__inferenceEnabled = True self.__dict__.pop('_HTMPredictionModel__encoderNetInfo', None) self.__dict__.pop('_HTMPredictionModel__nonTemporalNetInfo', None) self.__dict__.pop('_HTMPredictionModel__temporalNetInfo', None) if (not hasattr(self, '_netInfo')): self._hasSP = False self._hasTP = False if (self.__encoderNetInfo is not None): self._netInfo = self.__encoderNetInfo elif (self.__nonTemporalNetInfo is not None): self._netInfo = self.__nonTemporalNetInfo self._hasSP = True else: self._netInfo = self.__temporalNetInfo self._hasSP = True self._hasTP = True self.__dict__.pop('_HTMPredictionModel__encoderNetInfo', None) self.__dict__.pop('_HTMPredictionModel__nonTemporalNetInfo', None) self.__dict__.pop('_HTMPredictionModel__temporalNetInfo', None) self._classifierInputEncoder = None if (not hasattr(self, '_minLikelihoodThreshold')): self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD if (not hasattr(self, '_maxPredictionsPerStep')): self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP if (not hasattr(self, '_hasCL')): self._hasCL = (self._getClassifierRegion() is not None) self.__logger.debug(('Restoring %s from state...' % self.__class__.__name__))
':param proto: capnp HTMPredictionModelProto message builder'
def write(self, proto):
super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase) proto.numRunCalls = self.__numRunCalls proto.minLikelihoodThreshold = self._minLikelihoodThreshold proto.maxPredictionsPerStep = self._maxPredictionsPerStep self._netInfo.net.write(proto.network)
':param proto: capnp HTMPredictionModelProto message reader'
@classmethod def read(cls, proto):
network = Network.read(proto.network) spEnable = ('SP' in network.regions) tmEnable = ('TM' in network.regions) clEnable = ('Classifier' in network.regions) model = cls(spEnable=spEnable, tmEnable=tmEnable, clEnable=clEnable, network=network, baseProto=proto.modelBase) model.__numRunCalls = proto.numRunCalls model._minLikelihoodThreshold = proto.minLikelihoodThreshold model._maxPredictionsPerStep = proto.maxPredictionsPerStep model._getSensorRegion().getSelf().dataSource = DataBuffer() model._netInfo.net.initialize() model.__restoringFromState = False return model
'[virtual method override] This method is called during serialization with an external directory path that can be used to bypass pickle for saving large binary states. extraDataDir: Model\'s extra data directory path'
def _serializeExtraData(self, extraDataDir):
makeDirectoryFromAbsolutePath(extraDataDir) outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir) self.__logger.debug('Serializing network...') self._netInfo.net.save(outputDir) self.__logger.debug('Finished serializing network') return
'[virtual method override] This method is called during deserialization (after __setstate__) with an external directory path that can be used to bypass pickle for loading large binary states. extraDataDir: Model\'s extra data directory path'
def _deSerializeExtraData(self, extraDataDir):
assert self.__restoringFromState assert (self._netInfo.net is None), 'Network was already unpickled' stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir) self.__logger.debug('(%s) De-serializing network...', self) self._netInfo.net = Network(stateDir) self.__logger.debug('(%s) Finished de-serializing network', self) self._netInfo.net.initialize() if (self.getInferenceType() == InferenceType.TemporalAnomaly): classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__ if (classifierType is 'KNNClassifierRegion'): anomalyClParams = dict(trainRecords=self._classifier_helper._autoDetectWaitRecords, cacheSize=self._classifier_helper._history_length) spEnable = (self._getSPRegion() is not None) tmEnable = True knnRegion = self._getAnomalyClassifier().getSelf() self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams, spEnable, tmEnable) self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls self._getAnomalyClassifier().getSelf()._recordsCache = self._classifier_helper.saved_states self._getAnomalyClassifier().getSelf().saved_categories = self._classifier_helper.saved_categories self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion self._getTPRegion().setParameter('anomalyMode', True) del self._classifier_helper self._netInfo.net.initialize() self.__restoringFromState = False self.__logger.debug('(%s) Finished restoring from state', self) return
'Attaches an \'AnomalyClassifier\' region to the network. Will remove current \'AnomalyClassifier\' region if it exists. Parameters network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region tmEnable - True if network has a TM region; Currently requires True'
def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable):
allParams = copy.deepcopy(params) knnParams = dict(k=1, distanceMethod='rawOverlap', distanceNorm=1, doBinarization=1, replaceDuplicates=0, maxStoredPatterns=1000) allParams.update(knnParams) if (allParams['trainRecords'] is None): allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS if (allParams['cacheSize'] is None): allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE if ((self._netInfo is not None) and (self._netInfo.net is not None) and (self._getAnomalyClassifier() is not None)): self._netInfo.net.removeRegion('AnomalyClassifier') network.addRegion('AnomalyClassifier', 'py.KNNAnomalyClassifierRegion', json.dumps(allParams)) if spEnable: network.link('SP', 'AnomalyClassifier', 'UniformLink', '', srcOutput='bottomUpOut', destInput='spBottomUpOut') else: network.link('sensor', 'AnomalyClassifier', 'UniformLink', '', srcOutput='dataOut', destInput='spBottomUpOut') if tmEnable: network.link('TM', 'AnomalyClassifier', 'UniformLink', '', srcOutput='topDownOut', destInput='tpTopDownOut') network.link('TM', 'AnomalyClassifier', 'UniformLink', '', srcOutput='lrnActiveStateT', destInput='tpLrnActiveStateT') else: raise RuntimeError('TemporalAnomaly models require a TM region.')
'extraDataDir: Model\'s extra data directory path Returns: Absolute directory path for saving CLA Network'
def __getNetworkStateDirectory(self, extraDataDir):
if self.__restoringFromV1: if (self.getInferenceType() == InferenceType.TemporalNextStep): leafName = ('temporal' + '-network.nta') else: leafName = ('nonTemporal' + '-network.nta') else: leafName = (InferenceType.getLabel(self.getInferenceType()) + '-network.nta') path = os.path.join(extraDataDir, leafName) path = os.path.abspath(path) return path
'Mangles the given mangled (private) member name; a mangled member name is one whose name begins with two or more underscores and ends with one or zero underscores. privateMemberName: The private member name (e.g., "__logger") skipCheck: Pass True to skip test for presence of the demangled member in our instance. Returns: The demangled member name (e.g., "_HTMPredictionModel__logger")'
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
assert privateMemberName.startswith('__'), ("%r doesn't start with __" % privateMemberName) assert (not privateMemberName.startswith('___')), ('%r starts with ___' % privateMemberName) assert (not privateMemberName.endswith('__')), ('%r ends with more than one underscore' % privateMemberName) realName = (('_' + self.__myClassName.lstrip('_')) + privateMemberName) if (not skipCheck): getattr(self, realName) return realName
'Initialize interpreter with blacklisted nodes removed from supported nodes.'
def __init__(self, *args, **kwargs):
self.supported_nodes = tuple((set(self.supported_nodes) - self.blacklisted_nodes)) asteval.Interpreter.__init__(self, *args, **kwargs)
'Validates control dictionary for the experiment context'
def __validateExperimentControl(self, control):
taskList = control.get('tasks', None) if (taskList is not None): taskLabelsList = [] for task in taskList: validateOpfJsonValue(task, 'opfTaskSchema.json') validateOpfJsonValue(task['taskControl'], 'opfTaskControlSchema.json') taskLabel = task['taskLabel'] assert isinstance(taskLabel, types.StringTypes), ('taskLabel type: %r' % type(taskLabel)) assert (len(taskLabel) > 0), 'empty string taskLabel not is allowed' taskLabelsList.append(taskLabel.lower()) taskLabelDuplicates = filter((lambda x: (taskLabelsList.count(x) > 1)), taskLabelsList) assert (len(taskLabelDuplicates) == 0), ('Duplcate task labels are not allowed: %s' % taskLabelDuplicates) return
'Validates control dictionary for the nupic engine context'
def __validateNupicControl(self, control):
validateOpfJsonValue(control, 'nupicControlSchema.json')
'TODO: document :param stream:'
def normalizeStreamSource(self, stream):
source = stream['source'][len(FILE_SCHEME):] if os.path.isabs(source): sourcePath = source else: sourcePath = resource_filename('nupic.datafiles', source) if (not os.path.exists(sourcePath)): sourcePath = os.path.join(os.getcwd(), source) stream['source'] = (FILE_SCHEME + sourcePath)
'TODO: document'
def normalizeStreamSources(self):
task = dict(self.__control) if ('dataset' in task): for stream in task['dataset']['streams']: self.normalizeStreamSource(stream) else: for subtask in task['tasks']: for stream in subtask['dataset']['streams']: self.normalizeStreamSource(stream)
'TODO: document'
def convertNupicEnvToOPF(self):
task = dict(self.__control) task.pop('environment') inferenceArgs = task.pop('inferenceArgs') task['taskLabel'] = 'DefaultTask' iterationCount = task.get('iterationCount', (-1)) iterationCountInferOnly = task.pop('iterationCountInferOnly', 0) if (iterationCountInferOnly == (-1)): iterationCycle = [IterationPhaseSpecInferOnly(1000, inferenceArgs=inferenceArgs)] elif (iterationCountInferOnly > 0): assert (iterationCount > 0), 'When iterationCountInferOnly is specified, iterationCount must also be specified and not be -1' iterationCycle = [IterationPhaseSpecLearnAndInfer((iterationCount - iterationCountInferOnly), inferenceArgs=inferenceArgs), IterationPhaseSpecInferOnly(iterationCountInferOnly, inferenceArgs=inferenceArgs)] else: iterationCycle = [IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=inferenceArgs)] taskControl = dict(metrics=task.pop('metrics'), loggedMetrics=task.pop('loggedMetrics'), iterationCycle=iterationCycle) task['taskControl'] = taskControl self.__control = dict(environment=OpfEnvironment.Nupic, tasks=[task])
'requestedActivities: a sequence of PeriodicActivityRequest elements'
def __init__(self, requestedActivities=[]):
self.__activities = [] self.__appendActivities(requestedActivities) return
'Adds activities periodicActivities: A sequence of PeriodicActivityRequest elements'
def addActivities(self, periodicActivities):
self.__appendActivities(periodicActivities) return
'Activity tick handler; services all activities Returns: True if controlling iterator says it\'s okay to keep going; False to stop'
def tick(self):
for act in self.__activities: if (not act.iteratorHolder[0]): continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange((act.period - 1))) else: act.iteratorHolder[0] = None return True
'periodicActivities: A sequence of PeriodicActivityRequest elements'
def __appendActivities(self, periodicActivities):
for req in periodicActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange((req.period - 1)))]) self.__activities.append(act) return
'Add the label labelName to each record with record ROWID in range from start to end, noninclusive of end. This will recalculate all points from end to the last record stored in the internal cache of this classifier.'
def addLabel(self, start, end, labelName):
if (len(self.saved_states) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. Model has no saved records.") startID = self.saved_states[0].ROWID clippedStart = max(0, (start - startID)) clippedEnd = max(0, min(len(self.saved_states), (end - startID))) if (clippedEnd <= clippedStart): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.", debugInfo={'requestRange': {'startRecordID': start, 'endRecordID': end}, 'clippedRequestRange': {'startRecordID': clippedStart, 'endRecordID': clippedEnd}, 'validRange': {'startRecordID': startID, 'endRecordID': self.saved_states[(len(self.saved_states) - 1)].ROWID}, 'numRecordsStored': len(self.saved_states)}) for state in self.saved_states[clippedStart:clippedEnd]: if (labelName not in state.anomalyLabel): state.anomalyLabel.append(labelName) state.setByUser = True self._addRecordToKNN(state) assert (len(self.saved_categories) > 0) for state in self.saved_states[clippedEnd:]: self._updateState(state)
'Remove labels from each record with record ROWID in range from start to end, noninclusive of end. Removes all records if labelFilter is None, otherwise only removes the labels eqaul to labelFilter. This will recalculate all points from end to the last record stored in the internal cache of this classifier.'
def removeLabels(self, start=None, end=None, labelFilter=None):
if (len(self.saved_states) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'removeLabels'. Model has no saved records.") startID = self.saved_states[0].ROWID clippedStart = (0 if (start is None) else max(0, (start - startID))) clippedEnd = (len(self.saved_states) if (end is None) else max(0, min(len(self.saved_states), (end - startID)))) if (clippedEnd <= clippedStart): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'removeLabels'.", debugInfo={'requestRange': {'startRecordID': start, 'endRecordID': end}, 'clippedRequestRange': {'startRecordID': clippedStart, 'endRecordID': clippedEnd}, 'validRange': {'startRecordID': startID, 'endRecordID': self.saved_states[(len(self.saved_states) - 1)].ROWID}, 'numRecordsStored': len(self.saved_states)}) recordsToDelete = [] for state in self.saved_states[clippedStart:clippedEnd]: if (labelFilter is not None): if (labelFilter in state.anomalyLabel): state.anomalyLabel.remove(labelFilter) else: state.anomalyLabel = [] state.setByUser = False recordsToDelete.append(state) self._deleteRecordsFromKNN(recordsToDelete) self._deleteRangeFromKNN(start, end) for state in self.saved_states[clippedEnd:]: self._updateState(state) return {'status': 'success'}
'This method will add the record to the KNN classifier.'
def _addRecordToKNN(self, record):
classifier = self.htm_prediction_model._getAnomalyClassifier() knn = classifier.getSelf()._knn prototype_idx = classifier.getSelf().getParameter('categoryRecencyList') category = self._labelListToCategoryNumber(record.anomalyLabel) if (record.ROWID in prototype_idx): knn.prototypeSetCategory(record.ROWID, category) return pattern = self._getStateAnomalyVector(record) rowID = record.ROWID knn.learn(pattern, category, rowID=rowID)
'This method will remove the given records from the classifier. parameters recordsToDelete - list of records to delete from the classififier'
def _deleteRecordsFromKNN(self, recordsToDelete):
classifier = self.htm_prediction_model._getAnomalyClassifier() knn = classifier.getSelf()._knn prototype_idx = classifier.getSelf().getParameter('categoryRecencyList') idsToDelete = [r.ROWID for r in recordsToDelete if ((not r.setByUser) and (r.ROWID in prototype_idx))] nProtos = knn._numPatterns knn.removeIds(idsToDelete) assert (knn._numPatterns == (nProtos - len(idsToDelete)))
'This method will remove any stored records within the range from start to end. Noninclusive of end. parameters start - integer representing the ROWID of the start of the deletion range, end - integer representing the ROWID of the end of the deletion range, if None, it will default to end.'
def _deleteRangeFromKNN(self, start=0, end=None):
classifier = self.htm_prediction_model._getAnomalyClassifier() knn = classifier.getSelf()._knn prototype_idx = numpy.array(classifier.getSelf().getParameter('categoryRecencyList')) if (end is None): end = (prototype_idx.max() + 1) idsIdxToDelete = numpy.logical_and((prototype_idx >= start), (prototype_idx < end)) idsToDelete = prototype_idx[idsIdxToDelete] nProtos = knn._numPatterns knn.removeIds(idsToDelete.tolist()) assert (knn._numPatterns == (nProtos - len(idsToDelete)))
'return the classified labeling of record'
def _recomputeRecordFromKNN(self, record):
inputs = {'categoryIn': [None], 'bottomUpIn': self._getStateAnomalyVector(record)} outputs = {'categoriesOut': numpy.zeros((1,)), 'bestPrototypeIndices': numpy.zeros((1,)), 'categoryProbabilitiesOut': numpy.zeros((1,))} classifier = self.htm_prediction_model._getAnomalyClassifier() knn = classifier.getSelf()._knn classifier_indexes = numpy.array(classifier.getSelf().getParameter('categoryRecencyList')) valid_idx = numpy.where(((classifier_indexes >= self._autoDetectWaitRecords) & (classifier_indexes < record.ROWID)))[0].tolist() if (len(valid_idx) == 0): return None classifier.setParameter('inferenceMode', True) classifier.setParameter('learningMode', False) classifier.getSelf().compute(inputs, outputs) classifier.setParameter('learningMode', True) classifier_distances = classifier.getSelf().getLatestDistances() valid_distances = classifier_distances[valid_idx] if (valid_distances.min() <= self._classificationMaxDist): classifier_indexes_prev = classifier_indexes[valid_idx] rowID = classifier_indexes_prev[valid_distances.argmin()] indexID = numpy.where((classifier_indexes == rowID))[0][0] category = classifier.getSelf().getCategoryList()[indexID] return category return None
'Construct a _HTMClassificationRecord based on the current state of the htm_prediction_model of this classifier. ***This will look into the internals of the model and may depend on the SP, TM, and KNNClassifier***'
def _constructClassificationRecord(self):
model = self.htm_prediction_model sp = model._getSPRegion() tm = model._getTPRegion() tpImp = tm.getSelf()._tfdr activeColumns = sp.getOutputData('bottomUpOut').nonzero()[0] score = numpy.in1d(activeColumns, self._prevPredictedColumns).sum() score = ((self._activeColumnCount - score) / float(self._activeColumnCount)) spSize = sp.getParameter('activeOutputCount') tpSize = (tm.getParameter('cellsPerColumn') * tm.getParameter('columnCount')) classificationVector = numpy.array([]) if (self._vectorType == 'tpc'): classificationVector = numpy.zeros(tpSize) activeCellMatrix = tpImp.getLearnActiveStateT().reshape(tpSize, 1) activeCellIdx = numpy.where((activeCellMatrix > 0))[0] if (activeCellIdx.shape[0] > 0): classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1 elif (self._vectorType == 'sp_tpe'): classificationVector = numpy.zeros((spSize + spSize)) if (activeColumns.shape[0] > 0): classificationVector[activeColumns] = 1.0 errorColumns = numpy.setdiff1d(self._prevPredictedColumns, activeColumns) if (errorColumns.shape[0] > 0): errorColumnIndexes = (numpy.array(errorColumns, dtype=numpy.uint16) + spSize) classificationVector[errorColumnIndexes] = 1.0 else: raise TypeError(("Classification vector type must be either 'tpc' or 'sp_tpe', current value is %s" % self._vectorType)) numPredictedCols = len(self._prevPredictedColumns) predictedColumns = tm.getOutputData('topDownOut').nonzero()[0] self._prevPredictedColumns = copy.deepcopy(predictedColumns) if (self._anomalyVectorLength is None): self._anomalyVectorLength = len(classificationVector) result = _CLAClassificationRecord(ROWID=int((model.getParameter('__numRunCalls') - 1)), anomalyScore=score, anomalyVector=classificationVector.nonzero()[0].tolist(), anomalyLabel=[]) return result
'Run an iteration of this anomaly classifier'
def compute(self):
result = self._constructClassificationRecord() if (result.ROWID >= self._autoDetectWaitRecords): self._updateState(result) self.saved_states.append(result) if (len(self.saved_states) > self._history_length): self.saved_states.pop(0) return result
'Sets the autoDetectWaitRecords.'
def setAutoDetectWaitRecords(self, waitRecords):
if (not isinstance(waitRecords, int)): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. WaitRecord must be a number." % type(waitRecords))) if ((len(self.saved_states) > 0) and (waitRecords < self.saved_states[0].ROWID)): raise HTMPredictionModelInvalidArgument(('Invalid value. autoDetectWaitRecord value must be valid record within output stream. Current minimum ROWID in output stream is %d.' % self.saved_states[0].ROWID)) self._autoDetectWaitRecords = waitRecords for state in self.saved_states: self._updateState(state)
'Return the autoDetectWaitRecords.'
def getAutoDetectWaitRecords(self):
return self._autoDetectWaitRecords
'Sets the autoDetectThreshold. TODO: Ensure previously classified points outside of classifier are valid.'
def setAutoDetectThreshold(self, threshold):
if (not (isinstance(threshold, float) or isinstance(threshold, int))): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. threshold must be a number." % type(threshold))) self._autoDetectThreshold = threshold for state in self.saved_states: self._updateState(state)
'Return the autoDetectThreshold.'
def getAutoDetectThreshold(self):
return self._autoDetectThreshold
'Since the KNN Classifier stores categories as numbers, we must store each label as a number. This method converts from a label to a unique number. Each label is assigned a unique bit so multiple labels may be assigned to a single record.'
def _labelToCategoryNumber(self, label):
if (label not in self.saved_categories): self.saved_categories.append(label) return pow(2, self.saved_categories.index(label))
'This method takes a list of labels and returns a unique category number. This enables this class to store a list of categories for each point since the KNN classifier only stores a single number category for each record.'
def _labelListToCategoryNumber(self, labelList):
categoryNumber = 0 for label in labelList: categoryNumber += self._labelToCategoryNumber(label) return categoryNumber
'Converts a category number into a list of labels'
def _categoryToLabelList(self, category):
if (category is None): return [] labelList = [] labelNum = 0 while (category > 0): if ((category % 2) == 1): labelList.append(self.saved_categories[labelNum]) labelNum += 1 category = (category >> 1) return labelList
'Returns a state\'s anomaly vertor converting it from spare to dense'
def _getStateAnomalyVector(self, state):
vector = numpy.zeros(self._anomalyVectorLength) vector[state.anomalyVector] = 1 return vector
'new instance of MovingAverage, so method .next() can be used @param windowSize - length of sliding window @param existingHistoricalValues - construct the object with already some values in it.'
def __init__(self, windowSize, existingHistoricalValues=None):
if (not isinstance(windowSize, numbers.Integral)): raise TypeError('MovingAverage - windowSize must be integer type') if (windowSize <= 0): raise ValueError('MovingAverage - windowSize must be >0') self.windowSize = windowSize if (existingHistoricalValues is not None): self.slidingWindow = existingHistoricalValues[(len(existingHistoricalValues) - windowSize):] else: self.slidingWindow = [] self.total = float(sum(self.slidingWindow))
'Routine for computing a moving average. @param slidingWindow a list of previous values to use in computation that will be modified and returned @param total the sum of the values in slidingWindow to be used in the calculation of the moving average @param newVal a new number compute the new windowed average @param windowSize how many values to use in the moving window @returns an updated windowed average, the modified input slidingWindow list, and the new total sum of the sliding window'
@staticmethod def compute(slidingWindow, total, newVal, windowSize):
if (len(slidingWindow) == windowSize): total -= slidingWindow.pop(0) slidingWindow.append(newVal) total += newVal return ((float(total) / len(slidingWindow)), slidingWindow, total)
'Instance method wrapper around compute.'
def next(self, newValue):
(newAverage, self.slidingWindow, self.total) = self.compute(self.slidingWindow, self.total, newValue, self.windowSize) return newAverage
'get current average'
def getCurrentAvg(self):
return (float(self.total) / len(self.slidingWindow))
'for loading this object'
def __setstate__(self, state):
self.__dict__.update(state) if (not hasattr(self, 'slidingWindow')): self.slidingWindow = [] if (not hasattr(self, 'total')): self.total = 0 self.slidingWindow = sum(self.slidingWindow)
'Get Cap\'n Proto schema. ..warning: This is an abstract method. Per abc protocol, attempts to subclass without overriding will fail. @returns Cap\'n Proto schema'
@classmethod @abstractmethod def getSchema(cls):
pass
'Create a new object initialized from Cap\'n Proto obj. Note: This is an abstract method. Per abc protocol, attempts to subclass without overriding will fail. :param proto: Cap\'n Proto obj :return: Obj initialized from proto'
@classmethod @abstractmethod def read(cls, proto):
pass
'Write obj instance to Cap\'n Proto object .. warning: This is an abstract method. Per abc protocol, attempts to subclass without overriding will fail. :param proto: Cap\'n Proto obj'
@abstractmethod def write(self, proto):
pass
'Read serialized object from file. :param f: input file :param packed: If true, will assume content is packed :return: first-class instance initialized from proto obj'
@classmethod def readFromFile(cls, f, packed=True):
schema = cls.getSchema() if packed: proto = schema.read_packed(f) else: proto = schema.read(f) return cls.read(proto)
'Write serialized object to file. :param f: output file :param packed: If true, will pack contents.'
def writeToFile(self, f, packed=True):
schema = self.getSchema() proto = schema.new_message() self.write(proto) if packed: proto.write_packed(f) else: proto.write(f)
'Create a :class:`~nupic.algorithms.connections.Connections` instance. :class:`TemporalMemory` subclasses may override this method to choose a different :class:`~nupic.algorithms.connections.Connections` implementation, or to augment the instance otherwise returned by the default :class:`~nupic.algorithms.connections.Connections` implementation. See :class:`~nupic.algorithms.connections.Connections` for constructor signature and usage. :returns: :class:`~nupic.algorithms.connections.Connections` instance'
@staticmethod def connectionsFactory(*args, **kwargs):
return Connections(*args, **kwargs)
'Perform one time step of the Temporal Memory algorithm. This method calls :meth:`activateCells`, then calls :meth:`activateDendrites`. Using :class:`TemporalMemory` via its :meth:`compute` method ensures that you\'ll always be able to call :meth:`getPredictiveCells` to get predictions for the next time step. :param activeColumns: (iter) Indices of active columns. :param learn: (bool) Whether or not learning is enabled.'
def compute(self, activeColumns, learn=True):
self.activateCells(sorted(activeColumns), learn) self.activateDendrites(learn)
'Calculate the active cells, using the current active columns and dendrite segments. Grow and reinforce synapses. :param activeColumns: (iter) A sorted list of active column indices. :param learn: (bool) If true, reinforce / punish / grow synapses. **Pseudocode:** for each column if column is active and has active distal dendrite segments call activatePredictedColumn if column is active and doesn\'t have active distal dendrite segments call burstColumn if column is inactive and has matching distal dendrite segments call punishPredictedColumn'
def activateCells(self, activeColumns, learn=True):
prevActiveCells = self.activeCells prevWinnerCells = self.winnerCells self.activeCells = [] self.winnerCells = [] segToCol = (lambda segment: int((segment.cell / self.cellsPerColumn))) identity = (lambda x: x) for columnData in groupby2(activeColumns, identity, self.activeSegments, segToCol, self.matchingSegments, segToCol): (column, activeColumns, columnActiveSegments, columnMatchingSegments) = columnData if (activeColumns is not None): if (columnActiveSegments is not None): cellsToAdd = self.activatePredictedColumn(column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn) self.activeCells += cellsToAdd self.winnerCells += cellsToAdd else: (cellsToAdd, winnerCell) = self.burstColumn(column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn) self.activeCells += cellsToAdd self.winnerCells.append(winnerCell) elif learn: self.punishPredictedColumn(column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells)
'Calculate dendrite segment activity, using the current active cells. :param learn: (bool) If true, segment activations will be recorded. This information is used during segment cleanup. **Pseudocode:** for each distal dendrite segment with activity >= activationThreshold mark the segment as active for each distal dendrite segment with unconnected activity >= minThreshold mark the segment as matching'
def activateDendrites(self, learn=True):
(numActiveConnected, numActivePotential) = self.connections.computeActivity(self.activeCells, self.connectedPermanence) activeSegments = (self.connections.segmentForFlatIdx(i) for i in xrange(len(numActiveConnected)) if (numActiveConnected[i] >= self.activationThreshold)) matchingSegments = (self.connections.segmentForFlatIdx(i) for i in xrange(len(numActivePotential)) if (numActivePotential[i] >= self.minThreshold)) self.activeSegments = sorted(activeSegments, key=self.connections.segmentPositionSortKey) self.matchingSegments = sorted(matchingSegments, key=self.connections.segmentPositionSortKey) self.numActiveConnectedSynapsesForSegment = numActiveConnected self.numActivePotentialSynapsesForSegment = numActivePotential if learn: for segment in self.activeSegments: self.lastUsedIterationForSegment[segment.flatIdx] = self.iteration self.iteration += 1
'Indicates the start of a new sequence. Clears any predictions and makes sure synapses don\'t grow to the currently active cells in the next time step.'
def reset(self):
self.activeCells = [] self.winnerCells = [] self.activeSegments = [] self.matchingSegments = []
'Determines which cells in a predicted column should be added to winner cells list, and learns on the segments that correctly predicted this column. :param column: (int) Index of bursting column. :param columnActiveSegments: (iter) Active segments in this column. :param columnMatchingSegments: (iter) Matching segments in this column. :param prevActiveCells: (list) Active cells in ``t-1``. :param prevWinnerCells: (list) Winner cells in ``t-1``. :param learn: (bool) If true, grow and reinforce synapses. :returns: (list) A list of predicted cells that will be added to active cells and winner cells.'
def activatePredictedColumn(self, column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn):
return self._activatePredictedColumn(self.connections, self._random, columnActiveSegments, prevActiveCells, prevWinnerCells, self.numActivePotentialSynapsesForSegment, self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement, self.permanenceDecrement, self.maxSynapsesPerSegment, learn)
'Activates all of the cells in an unpredicted active column, chooses a winner cell, and, if learning is turned on, learns on one segment, growing a new segment if necessary. :param column: (int) Index of bursting column. :param columnMatchingSegments: (iter) Matching segments in this column, or None if there aren\'t any. :param prevActiveCells: (list) Active cells in ``t-1``. :param prevWinnerCells: (list) Winner cells in ``t-1``. :param learn: (bool) Whether or not learning is enabled. :returns: (tuple) Contains (``cells`` [iter], ``winnerCell`` [int])'
def burstColumn(self, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn):
start = (self.cellsPerColumn * column) cellsForColumn = xrange(start, (start + self.cellsPerColumn)) return self._burstColumn(self.connections, self._random, self.lastUsedIterationForSegment, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn, self.numActivePotentialSynapsesForSegment, self.iteration, self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement, self.permanenceDecrement, self.maxSegmentsPerCell, self.maxSynapsesPerSegment, learn)
'Punishes the Segments that incorrectly predicted a column to be active. :param column: (int) Index of bursting column. :param columnActiveSegments: (iter) Active segments for this column, or None if there aren\'t any. :param columnMatchingSegments: (iter) Matching segments for this column, or None if there aren\'t any. :param prevActiveCells: (list) Active cells in ``t-1``. :param prevWinnerCells: (list) Winner cells in ``t-1``.'
def punishPredictedColumn(self, column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells):
self._punishPredictedColumn(self.connections, columnMatchingSegments, prevActiveCells, self.predictedSegmentDecrement)
'Create a :class:`~nupic.algorithms.connections.Segment` on the specified cell. This method calls :meth:`~nupic.algorithms.connections.Connections.createSegment` on the underlying :class:`~nupic.algorithms.connections.Connections`, and it does some extra bookkeeping. Unit tests should call this method, and not :meth:`~nupic.algorithms.connections.Connections.createSegment`. :param cell: (int) Index of cell to create a segment on. :returns: (:class:`~nupic.algorithms.connections.Segment`) The created segment.'
def createSegment(self, cell):
return self._createSegment(self.connections, self.lastUsedIterationForSegment, cell, self.iteration, self.maxSegmentsPerCell)
':param connections: (Object) Connections for the TM. Gets mutated. :param random: (Object) Random number generator. Gets mutated. :param columnActiveSegments: (iter) Active segments in this column. :param prevActiveCells: (list) Active cells in `t-1`. :param prevWinnerCells: (list) Winner cells in `t-1`. :param numActivePotentialSynapsesForSegment: (list) Number of active potential synapses per segment, indexed by the segment\'s flatIdx. :param maxNewSynapseCount: (int) The maximum number of synapses added to a segment during learning :param initialPermanence: (float) Initial permanence of a new synapse. @permanenceIncrement (float) Amount by which permanences of synapses are incremented during learning. @permanenceDecrement (float) Amount by which permanences of synapses are decremented during learning. :param maxSynapsesPerSegment: (int) The maximum number of synapses per segment. :param learn: (bool) If true, grow and reinforce synapses. :returns: cellsToAdd (list) A list of predicted cells that will be added to active cells and winner cells. Pseudocode: for each cell in the column that has an active distal dendrite segment mark the cell as active mark the cell as a winner cell (learning) for each active distal dendrite segment strengthen active synapses weaken inactive synapses grow synapses to previous winner cells'
@classmethod def _activatePredictedColumn(cls, connections, random, columnActiveSegments, prevActiveCells, prevWinnerCells, numActivePotentialSynapsesForSegment, maxNewSynapseCount, initialPermanence, permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment, learn):
cellsToAdd = [] previousCell = None for segment in columnActiveSegments: if (segment.cell != previousCell): cellsToAdd.append(segment.cell) previousCell = segment.cell if learn: cls._adaptSegment(connections, segment, prevActiveCells, permanenceIncrement, permanenceDecrement) active = numActivePotentialSynapsesForSegment[segment.flatIdx] nGrowDesired = (maxNewSynapseCount - active) if (nGrowDesired > 0): cls._growSynapses(connections, random, segment, nGrowDesired, prevWinnerCells, initialPermanence, maxSynapsesPerSegment) return cellsToAdd
':param connections: (Object) Connections for the TM. Gets mutated. :param random: (Object) Random number generator. Gets mutated. :param lastUsedIterationForSegment: (list) Last used iteration for each segment, indexed by the segment\'s flatIdx. Gets mutated. :param column: (int) Index of bursting column. :param columnMatchingSegments: (iter) Matching segments in this column. :param prevActiveCells: (list) Active cells in `t-1`. :param prevWinnerCells: (list) Winner cells in `t-1`. :param cellsForColumn: (sequence) Range of cell indices on which to operate. :param numActivePotentialSynapsesForSegment: (list) Number of active potential synapses per segment, indexed by the segment\'s flatIdx. :param iteration: (int) The current timestep. :param maxNewSynapseCount: (int) The maximum number of synapses added to a segment during learning. :param initialPermanence: (float) Initial permanence of a new synapse. :param permanenceIncrement: (float) Amount by which permanences of synapses are incremented during learning. :param permanenceDecrement: (float) Amount by which permanences of synapses are decremented during learning. :param maxSegmentsPerCell: (int) The maximum number of segments per cell. :param maxSynapsesPerSegment: (int) The maximum number of synapses per segment. :param learn: (bool) Whether or not learning is enabled. :returns: (tuple) Contains: `cells` (iter), `winnerCell` (int), Pseudocode: mark all cells as active if there are any matching distal dendrite segments find the most active matching segment mark its cell as a winner cell (learning) grow and reinforce synapses to previous winner cells else find the cell with the least segments, mark it as a winner cell (learning) (optimization) if there are prev winner cells add a segment to this winner cell grow synapses to previous winner cells'
@classmethod def _burstColumn(cls, connections, random, lastUsedIterationForSegment, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn, numActivePotentialSynapsesForSegment, iteration, maxNewSynapseCount, initialPermanence, permanenceIncrement, permanenceDecrement, maxSegmentsPerCell, maxSynapsesPerSegment, learn):
if (columnMatchingSegments is not None): numActive = (lambda s: numActivePotentialSynapsesForSegment[s.flatIdx]) bestMatchingSegment = max(columnMatchingSegments, key=numActive) winnerCell = bestMatchingSegment.cell if learn: cls._adaptSegment(connections, bestMatchingSegment, prevActiveCells, permanenceIncrement, permanenceDecrement) nGrowDesired = (maxNewSynapseCount - numActive(bestMatchingSegment)) if (nGrowDesired > 0): cls._growSynapses(connections, random, bestMatchingSegment, nGrowDesired, prevWinnerCells, initialPermanence, maxSynapsesPerSegment) else: winnerCell = cls._leastUsedCell(random, cellsForColumn, connections) if learn: nGrowExact = min(maxNewSynapseCount, len(prevWinnerCells)) if (nGrowExact > 0): segment = cls._createSegment(connections, lastUsedIterationForSegment, winnerCell, iteration, maxSegmentsPerCell) cls._growSynapses(connections, random, segment, nGrowExact, prevWinnerCells, initialPermanence, maxSynapsesPerSegment) return (cellsForColumn, winnerCell)
':param connections: (Object) Connections for the TM. Gets mutated. :param columnMatchingSegments: (iter) Matching segments for this column. :param prevActiveCells: (list) Active cells in `t-1`. :param predictedSegmentDecrement: (float) Amount by which segments are punished for incorrect predictions. Pseudocode: for each matching segment in the column weaken active synapses'
@classmethod def _punishPredictedColumn(cls, connections, columnMatchingSegments, prevActiveCells, predictedSegmentDecrement):
if ((predictedSegmentDecrement > 0.0) and (columnMatchingSegments is not None)): for segment in columnMatchingSegments: cls._adaptSegment(connections, segment, prevActiveCells, (- predictedSegmentDecrement), 0.0)
'Create a segment on the connections, enforcing the maxSegmentsPerCell parameter.'
@classmethod def _createSegment(cls, connections, lastUsedIterationForSegment, cell, iteration, maxSegmentsPerCell):
while (connections.numSegments(cell) >= maxSegmentsPerCell): leastRecentlyUsedSegment = min(connections.segmentsForCell(cell), key=(lambda segment: lastUsedIterationForSegment[segment.flatIdx])) connections.destroySegment(leastRecentlyUsedSegment) segment = connections.createSegment(cell) if (segment.flatIdx == len(lastUsedIterationForSegment)): lastUsedIterationForSegment.append(iteration) elif (segment.flatIdx < len(lastUsedIterationForSegment)): lastUsedIterationForSegment[segment.flatIdx] = iteration else: raise AssertionError('All segments should be created with the TM createSegment method.') return segment
'Destroy nDestroy synapses on the specified segment, but don\'t destroy synapses to the "excludeCells".'
@classmethod def _destroyMinPermanenceSynapses(cls, connections, random, segment, nDestroy, excludeCells):
destroyCandidates = sorted((synapse for synapse in connections.synapsesForSegment(segment) if (synapse.presynapticCell not in excludeCells)), key=(lambda s: s._ordinal)) for _ in xrange(nDestroy): if (len(destroyCandidates) == 0): break minSynapse = None minPermanence = float('inf') for synapse in destroyCandidates: if (synapse.permanence < (minPermanence - EPSILON)): minSynapse = synapse minPermanence = synapse.permanence connections.destroySynapse(minSynapse) destroyCandidates.remove(minSynapse)
'Gets the cell with the smallest number of segments. Break ties randomly. :param random: (Object) Random number generator. Gets mutated. :param cells: (list) Indices of cells. :param connections: (Object) Connections instance for the TM. :returns: (int) Cell index.'
@classmethod def _leastUsedCell(cls, random, cells, connections):
leastUsedCells = [] minNumSegments = float('inf') for cell in cells: numSegments = connections.numSegments(cell) if (numSegments < minNumSegments): minNumSegments = numSegments leastUsedCells = [] if (numSegments == minNumSegments): leastUsedCells.append(cell) i = random.getUInt32(len(leastUsedCells)) return leastUsedCells[i]
'Creates nDesiredNewSynapes synapses on the segment passed in if possible, choosing random cells from the previous winner cells that are not already on the segment. :param connections: (Object) Connections instance for the tm :param random: (Object) TM object used to generate random numbers :param segment: (int) Segment to grow synapses on. :param nDesiredNewSynapes: (int) Desired number of synapses to grow :param prevWinnerCells: (list) Winner cells in `t-1` :param initialPermanence: (float) Initial permanence of a new synapse.'
@classmethod def _growSynapses(cls, connections, random, segment, nDesiredNewSynapes, prevWinnerCells, initialPermanence, maxSynapsesPerSegment):
candidates = list(prevWinnerCells) for synapse in connections.synapsesForSegment(segment): i = binSearch(candidates, synapse.presynapticCell) if (i != (-1)): del candidates[i] nActual = min(nDesiredNewSynapes, len(candidates)) overrun = ((connections.numSynapses(segment) + nActual) - maxSynapsesPerSegment) if (overrun > 0): cls._destroyMinPermanenceSynapses(connections, random, segment, overrun, prevWinnerCells) nActual = min(nActual, (maxSynapsesPerSegment - connections.numSynapses(segment))) for _ in range(nActual): i = random.getUInt32(len(candidates)) connections.createSynapse(segment, candidates[i], initialPermanence) del candidates[i]
'Updates synapses on segment. Strengthens active synapses; weakens inactive synapses. :param connections: (Object) Connections instance for the tm :param segment: (int) Segment to adapt :param prevActiveCells: (list) Active cells in `t-1` :param permanenceIncrement: (float) Amount to increment active synapses :param permanenceDecrement: (float) Amount to decrement inactive synapses'
@classmethod def _adaptSegment(cls, connections, segment, prevActiveCells, permanenceIncrement, permanenceDecrement):
synapsesToDestroy = [] for synapse in connections.synapsesForSegment(segment): permanence = synapse.permanence if (binSearch(prevActiveCells, synapse.presynapticCell) != (-1)): permanence += permanenceIncrement else: permanence -= permanenceDecrement permanence = max(0.0, min(1.0, permanence)) if (permanence < EPSILON): synapsesToDestroy.append(synapse) else: connections.updateSynapsePermanence(synapse, permanence) for synapse in synapsesToDestroy: connections.destroySynapse(synapse) if (connections.numSynapses(segment) == 0): connections.destroySegment(segment)
'Returns the index of the column that a cell belongs to. :param cell: (int) Cell index :returns: (int) Column index'
def columnForCell(self, cell):
self._validateCell(cell) return int((cell / self.cellsPerColumn))
'Returns the indices of cells that belong to a column. :param column: (int) Column index :returns: (list) Cell indices'
def cellsForColumn(self, column):
self._validateColumn(column) start = (self.cellsPerColumn * column) end = (start + self.cellsPerColumn) return range(start, end)
'Returns the number of columns in this layer. :returns: (int) Number of columns'
def numberOfColumns(self):
return reduce(mul, self.columnDimensions, 1)
'Returns the number of cells in this layer. :returns: (int) Number of cells'
def numberOfCells(self):
return (self.numberOfColumns() * self.cellsPerColumn)
'Maps cells to the columns they belong to. :param cells: (set) Cells :returns: (dict) Mapping from columns to their cells in `cells`'
def mapCellsToColumns(self, cells):
cellsForColumns = defaultdict(set) for cell in cells: column = self.columnForCell(cell) cellsForColumns[column].add(cell) return cellsForColumns
'Returns the indices of the active cells. :returns: (list) Indices of active cells.'
def getActiveCells(self):
return self.getCellIndices(self.activeCells)
'Returns the indices of the predictive cells. :returns: (list) Indices of predictive cells.'
def getPredictiveCells(self):
previousCell = None predictiveCells = [] for segment in self.activeSegments: if (segment.cell != previousCell): predictiveCells.append(segment.cell) previousCell = segment.cell return predictiveCells
'Returns the indices of the winner cells. :returns: (list) Indices of winner cells.'
def getWinnerCells(self):
return self.getCellIndices(self.winnerCells)
'Returns the active segments. :returns: (list) Active segments'
def getActiveSegments(self):
return self.activeSegments
'Returns the matching segments. :returns: (list) Matching segments'
def getMatchingSegments(self):
return self.matchingSegments
'Returns the number of cells per column. :returns: (int) The number of cells per column.'
def getCellsPerColumn(self):
return self.cellsPerColumn
'Returns the dimensions of the columns in the region. :returns: (tuple) Column dimensions'
def getColumnDimensions(self):
return self.columnDimensions
'Returns the activation threshold. :returns: (int) The activation threshold.'
def getActivationThreshold(self):
return self.activationThreshold
'Sets the activation threshold. :param activationThreshold: (int) activation threshold.'
def setActivationThreshold(self, activationThreshold):
self.activationThreshold = activationThreshold
'Get the initial permanence. :returns: (float) The initial permanence.'
def getInitialPermanence(self):
return self.initialPermanence
'Sets the initial permanence. :param initialPermanence: (float) The initial permanence.'
def setInitialPermanence(self, initialPermanence):
self.initialPermanence = initialPermanence
'Returns the min threshold. :returns: (int) The min threshold.'
def getMinThreshold(self):
return self.minThreshold
'Sets the min threshold. :param minThreshold: (int) min threshold.'
def setMinThreshold(self, minThreshold):
self.minThreshold = minThreshold
'Returns the max new synapse count. :returns: (int) The max new synapse count.'
def getMaxNewSynapseCount(self):
return self.maxNewSynapseCount
'Sets the max new synapse count. :param maxNewSynapseCount: (int) Max new synapse count.'
def setMaxNewSynapseCount(self, maxNewSynapseCount):
self.maxNewSynapseCount = maxNewSynapseCount
'Get the permanence increment. :returns: (float) The permanence increment.'
def getPermanenceIncrement(self):
return self.permanenceIncrement
'Sets the permanence increment. :param permanenceIncrement: (float) The permanence increment.'
def setPermanenceIncrement(self, permanenceIncrement):
self.permanenceIncrement = permanenceIncrement
'Get the permanence decrement. :returns: (float) The permanence decrement.'
def getPermanenceDecrement(self):
return self.permanenceDecrement