desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test the distribution of predictions with overlapping input SDRs Here, we intend the classifier to learn the associations: SDR1 => bucketIdx 0 (30%) => bucketIdx 1 (30%) => bucketIdx 2 (40%) SDR2 => bucketIdx 1 (50%) => bucketIdx 3 (50%) SDR1 and SDR2 has 10% overlaps (2 bits out of 20) The classifier should get the distribution almost right despite the overlap'
def testPredictionDistributionOverlap(self):
c = self._classifier([0], 0.0005, 0.1, 0) recordNum = 0 SDR1 = numpy.arange(0, 39, step=2) SDR2 = numpy.arange(1, 40, step=2) SDR2[3] = SDR1[5] SDR2[5] = SDR1[11] random.seed(42) for _ in xrange(5000): randomNumber = random.random() if (randomNumber < 0.3): bucketIdx = 0 elif (randomNumber < 0.6): bucketIdx = 1 else: bucketIdx = 2 c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 randomNumber = random.random() if (randomNumber < 0.5): bucketIdx = 1 else: bucketIdx = 3 c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 result1 = c.compute(recordNum=recordNum, patternNZ=SDR1, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result1[0][0], 0.3, places=1) self.assertAlmostEqual(result1[0][1], 0.3, places=1) self.assertAlmostEqual(result1[0][2], 0.4, places=1) result2 = c.compute(recordNum=recordNum, patternNZ=SDR2, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result2[0][1], 0.5, places=1) self.assertAlmostEqual(result2[0][3], 0.5, places=1)
'Test the distribution of predictions. Here, we intend the classifier to learn the associations: [1,3,5] => bucketIdx 0 & 1 [2,4,6] => bucketIdx 2 & 3 The classifier should get the distribution almost right given enough repetitions and a small learning rate'
def testPredictionMultipleCategories(self):
c = self._classifier([0], 0.001, 0.1, 0) SDR1 = [1, 3, 5] SDR2 = [2, 4, 6] recordNum = 0 random.seed(42) for _ in xrange(5000): c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': [0, 1], 'actValue': [0, 1]}, learn=True, infer=False) recordNum += 1 c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': [2, 3], 'actValue': [2, 3]}, learn=True, infer=False) recordNum += 1 result1 = c.compute(recordNum=recordNum, patternNZ=SDR1, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result1[0][0], 0.5, places=1) self.assertAlmostEqual(result1[0][1], 0.5, places=1) result2 = c.compute(recordNum=recordNum, patternNZ=SDR2, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result2[0][2], 0.5, places=1) self.assertAlmostEqual(result2[0][3], 0.5, places=1)
'Test continuous learning First, we intend the classifier to learn the associations: SDR1 => bucketIdx 0 (30%) => bucketIdx 1 (30%) => bucketIdx 2 (40%) SDR2 => bucketIdx 1 (50%) => bucketIdx 3 (50%) After 20000 iterations, we change the association to SDR1 => bucketIdx 0 (30%) => bucketIdx 1 (20%) => bucketIdx 3 (40%) No further training for SDR2 The classifier should adapt continuously and learn new associations for SDR1, but at the same time remember the old association for SDR2'
def testPredictionDistributionContinuousLearning(self):
c = self._classifier([0], 0.001, 0.1, 0) recordNum = 0 SDR1 = [1, 3, 5] SDR2 = [2, 4, 6] random.seed(42) for _ in xrange(10000): randomNumber = random.random() if (randomNumber < 0.3): bucketIdx = 0 elif (randomNumber < 0.6): bucketIdx = 1 else: bucketIdx = 2 c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 randomNumber = random.random() if (randomNumber < 0.5): bucketIdx = 1 else: bucketIdx = 3 c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=True) recordNum += 1 result1 = c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': 0, 'actValue': 0}, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result1[0][0], 0.3, places=1) self.assertAlmostEqual(result1[0][1], 0.3, places=1) self.assertAlmostEqual(result1[0][2], 0.4, places=1) result2 = c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': 0, 'actValue': 0}, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result2[0][1], 0.5, places=1) self.assertAlmostEqual(result2[0][3], 0.5, places=1) for _ in xrange(20000): randomNumber = random.random() if (randomNumber < 0.3): bucketIdx = 0 elif (randomNumber < 0.6): bucketIdx = 1 else: bucketIdx = 3 c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 result1new = c.compute(recordNum=recordNum, patternNZ=SDR1, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result1new[0][0], 0.3, places=1) self.assertAlmostEqual(result1new[0][1], 0.3, places=1) self.assertAlmostEqual(result1new[0][3], 0.4, places=1) result2new = c.compute(recordNum=recordNum, patternNZ=SDR2, classification=None, learn=False, infer=True) recordNum += 1 self.assertSequenceEqual(list(result2[0]), list(result2new[0]))
'Test multi-step predictions We train the 0-step and the 1-step classifiers simultaneously on data stream (SDR1, bucketIdx0) (SDR2, bucketIdx1) (SDR1, bucketIdx0) (SDR2, bucketIdx1) We intend the 0-step classifier to learn the associations: SDR1 => bucketIdx 0 SDR2 => bucketIdx 1 and the 1-step classifier to learn the associations SDR1 => bucketIdx 1 SDR2 => bucketIdx 0'
def testMultiStepPredictions(self):
c = self._classifier([0, 1], 1.0, 0.1, 0) SDR1 = [1, 3, 5] SDR2 = [2, 4, 6] recordNum = 0 for _ in xrange(100): c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=False) recordNum += 1 c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': 1, 'actValue': 1}, learn=True, infer=False) recordNum += 1 result1 = c.compute(recordNum=recordNum, patternNZ=SDR1, classification=None, learn=False, infer=True) result2 = c.compute(recordNum=recordNum, patternNZ=SDR2, classification=None, learn=False, infer=True) self.assertAlmostEqual(result1[0][0], 1.0, places=1) self.assertAlmostEqual(result1[0][1], 0.0, places=1) self.assertAlmostEqual(result2[0][0], 0.0, places=1) self.assertAlmostEqual(result2[0][1], 1.0, places=1)
'Test creation, pickling, and basic run of learning and inference.'
def _basicTest(self, tm=None):
trainingSet = _getSimplePatterns(10, 10) for _ in range(2): for seq in trainingSet[0:5]: for _ in range(10): tm.learn(seq) tm.reset() print 'Learning completed' print 'Running inference' tm.collectStats = True for seq in trainingSet[0:5]: tm.reset() tm.resetStats() for _ in range(10): tm.infer(seq) if (VERBOSITY > 1): print _printOneTrainingVector(seq) tm.printStates(False, False) print print if (VERBOSITY > 1): print tm.getStats() self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8) print ("tm.getStats()['predictionScoreAvg2'] = ", tm.getStats()['predictionScoreAvg2']) print 'TMConstant basicTest ok'
'Test cumulative anomaly scores.'
def testAnomalyCumulative(self):
anomalyComputer = anomaly.Anomaly(slidingWindowSize=3) predicted = (array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6])) actual = (array([1, 2, 6]), array([1, 2, 6]), array([1, 4, 6]), array([10, 11, 6]), array([10, 11, 12]), array([10, 11, 12]), array([10, 11, 12]), array([1, 2, 6]), array([1, 2, 6])) anomalyExpected = (0.0, 0.0, (1.0 / 9.0), (3.0 / 9.0), (2.0 / 3.0), (8.0 / 9.0), 1.0, (2.0 / 3.0), (1.0 / 3.0)) for (act, pred, expected) in zip(actual, predicted, anomalyExpected): score = anomalyComputer.compute(act, pred) self.assertAlmostEqual(score, expected, places=5, msg=("Anomaly score of %f doesn't match expected of %f" % (score, expected)))
'serialization using pickle'
def testSerialization(self):
aDef = Anomaly() aLike = Anomaly(mode=Anomaly.MODE_LIKELIHOOD) aWeig = Anomaly(mode=Anomaly.MODE_WEIGHTED) aAll = Anomaly(mode=Anomaly.MODE_LIKELIHOOD, slidingWindowSize=5) inst = [aDef, aLike, aWeig, aAll] for a in inst: stored = pickle.dumps(a) restored = pickle.loads(stored) self.assertEqual(a, restored)
'Feed in some vectors and retrieve outputs. Ensure the right number of columns win, that we always get binary outputs, and that nothing crashes.'
def basicComputeLoop(self, imp, params, inputSize, columnDimensions, seed=None):
sp = CreateSP(imp, params) numRecords = 100 randomState = getNumpyRandomGenerator(seed) inputMatrix = (randomState.rand(numRecords, inputSize) > 0.8).astype(uintType) y = numpy.zeros(columnDimensions, dtype=uintType) dutyCycles = numpy.zeros(columnDimensions, dtype=uintType) for v in inputMatrix: y.fill(0) sp.compute(v, True, y) self.assertEqual(sp.getNumActiveColumnsPerInhArea(), y.sum()) self.assertEqual(0, y.min()) self.assertEqual(1, y.max()) for v in inputMatrix: y.fill(0) sp.compute(v, False, y) self.assertEqual(sp.getNumActiveColumnsPerInhArea(), y.sum()) self.assertEqual(0, y.min()) self.assertEqual(1, y.max())
'Run basicComputeLoop with mostly default parameters'
def testBasicCompute1(self):
inputSize = 30 columnDimensions = 50 params = {'inputDimensions': [inputSize], 'columnDimensions': [columnDimensions], 'potentialRadius': inputSize, 'globalInhibition': True, 'seed': int(((time.time() % 10000) * 10))} print 'testBasicCompute1, SP seed set to:', params['seed'] self.basicComputeLoop('py', params, inputSize, columnDimensions) self.basicComputeLoop('cpp', params, inputSize, columnDimensions)
'Run basicComputeLoop with learning turned off.'
def testBasicCompute2(self):
inputSize = 100 columnDimensions = 100 params = {'inputDimensions': [inputSize], 'columnDimensions': [columnDimensions], 'potentialRadius': inputSize, 'globalInhibition': True, 'synPermActiveInc': 0.0, 'synPermInactiveDec': 0.0, 'seed': int(((time.time() % 10000) * 10))} print 'testBasicCompute2, SP seed set to:', params['seed'] self.basicComputeLoop('py', params, inputSize, columnDimensions) self.basicComputeLoop('cpp', params, inputSize, columnDimensions)
'Checks that feeding in the same input vector leads to polarized permanence values: either zeros or ones, but no fractions'
def testCompute1(self):
sp = SpatialPooler(inputDimensions=[9], columnDimensions=[5], potentialRadius=3, potentialPct=0.5, globalInhibition=False, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=3, stimulusThreshold=1, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, dutyCyclePeriod=10, boostStrength=10.0, seed=getSeed(), spVerbosity=0) sp._potentialPools = BinaryCorticalColumns(numpy.ones([sp._numColumns, sp._numInputs])) sp._inhibitColumns = Mock(return_value=numpy.array(range(5))) inputVector = numpy.array([1, 0, 1, 0, 1, 0, 0, 1, 1]) activeArray = numpy.zeros(5) for i in xrange(20): sp.compute(inputVector, True, activeArray) for i in xrange(sp._numColumns): perm = sp._permanences.getRow(i) self.assertEqual(list(perm), list(inputVector))
'Checks that columns only change the permanence values for inputs that are within their potential pool'
def testCompute2(self):
sp = SpatialPooler(inputDimensions=[10], columnDimensions=[5], potentialRadius=3, potentialPct=0.5, globalInhibition=False, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=3, stimulusThreshold=1, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, dutyCyclePeriod=10, boostStrength=10.0, seed=getSeed(), spVerbosity=0) sp._inhibitColumns = Mock(return_value=numpy.array(range(5))) inputVector = numpy.ones(sp._numInputs) activeArray = numpy.zeros(5) for i in xrange(20): sp.compute(inputVector, True, activeArray) for columnIndex in xrange(sp._numColumns): potential = sp._potentialPools[columnIndex] perm = sp._permanences.getRow(columnIndex) self.assertEqual(list(perm), list(potential))
'When stimulusThreshold is 0, allow columns without any overlap to become active. This test focuses on the global inhibition code path.'
def testZeroOverlap_NoStimulusThreshold_GlobalInhibition(self):
inputSize = 10 nColumns = 20 sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=True, numActiveColumnsPerInhArea=3, stimulusThreshold=0, seed=getSeed()) inputVector = numpy.zeros(inputSize) activeArray = numpy.zeros(nColumns) sp.compute(inputVector, True, activeArray) self.assertEqual(3, len(activeArray.nonzero()[0]))
'When stimulusThreshold is > 0, don\'t allow columns without any overlap to become active. This test focuses on the global inhibition code path.'
def testZeroOverlap_StimulusThreshold_GlobalInhibition(self):
inputSize = 10 nColumns = 20 sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=True, numActiveColumnsPerInhArea=3, stimulusThreshold=1, seed=getSeed()) inputVector = numpy.zeros(inputSize) activeArray = numpy.zeros(nColumns) sp.compute(inputVector, True, activeArray) self.assertEqual(0, len(activeArray.nonzero()[0]))
'When stimulusThreshold is 0, allow columns without any overlap to become active. This test focuses on the local inhibition code path.'
def testZeroOverlap_NoStimulusThreshold_LocalInhibition(self):
inputSize = 10 nColumns = 20 sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=5, globalInhibition=False, numActiveColumnsPerInhArea=1, stimulusThreshold=0, seed=getSeed()) sp.setInhibitionRadius(2) inputVector = numpy.zeros(inputSize) activeArray = numpy.zeros(nColumns) sp.compute(inputVector, True, activeArray) self.assertEqual(len(activeArray.nonzero()[0]), 6)
'When stimulusThreshold is > 0, don\'t allow columns without any overlap to become active. This test focuses on the local inhibition code path.'
def testZeroOverlap_StimulusThreshold_LocalInhibition(self):
inputSize = 10 nColumns = 20 sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=False, numActiveColumnsPerInhArea=3, stimulusThreshold=1, seed=getSeed()) inputVector = numpy.zeros(inputSize) activeArray = numpy.zeros(nColumns) sp.compute(inputVector, True, activeArray) self.assertEqual(0, len(activeArray.nonzero()[0]))
'Checks that overlaps and boostedOverlaps are correctly returned'
def testOverlapsOutput(self):
sp = SpatialPooler(inputDimensions=[5], columnDimensions=[3], potentialRadius=5, numActiveColumnsPerInhArea=5, globalInhibition=True, seed=1, synPermActiveInc=0.1, synPermInactiveDec=0.1) inputVector = numpy.ones(5) activeArray = numpy.zeros(3) expOutput = numpy.array([2, 0, 0], dtype=realDType) boostFactors = (2.0 * numpy.ones(3)) sp.setBoostFactors(boostFactors) sp.compute(inputVector, True, activeArray) overlaps = sp.getOverlaps() boostedOverlaps = sp.getBoostedOverlaps() for i in range(sp.getNumColumns()): self.assertEqual(overlaps[i], expOutput[i]) for i in range(sp.getNumColumns()): self.assertEqual(boostedOverlaps[i], (2 * expOutput[i]))
'Given a specific input and initialization params the SP should return this exact output. Previously output varied between platforms (OSX/Linux etc)'
def testExactOutput(self):
expectedOutput = [57, 80, 135, 215, 281, 350, 431, 534, 556, 565, 574, 595, 663, 759, 777, 823, 932, 933, 1031, 1126, 1184, 1262, 1468, 1479, 1516, 1531, 1585, 1672, 1793, 1807, 1906, 1927, 1936, 1939, 1940, 1944, 1957, 1978, 2040, 2047] sp = SpatialPooler(inputDimensions=[1, 188], columnDimensions=[2048, 1], potentialRadius=94, potentialPct=0.5, globalInhibition=1, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=40.0, stimulusThreshold=0, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.001, dutyCyclePeriod=1000, boostStrength=10.0, seed=1956, spVerbosity=0) inputVector = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] inputArray = numpy.array(inputVector).astype(realDType) activeArray = numpy.zeros(2048) sp.compute(inputArray, 1, activeArray) spOutput = [i for (i, v) in enumerate(activeArray) if (v != 0)] self.assertEqual(sorted(spOutput), expectedOutput)
'Test that column computes overlap and percent overlap correctly.'
def testCalculateOverlap(self):
sp = SpatialPooler(inputDimensions=[10], columnDimensions=[5]) sp._connectedSynapses = BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]) sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0]) inputVector = numpy.zeros(sp._numInputs, dtype='float32') overlaps = sp._calculateOverlap(inputVector) overlapsPct = sp._calculateOverlapPct(overlaps) trueOverlaps = list(numpy.array([0, 0, 0, 0, 0], dtype=realDType)) trueOverlapsPct = list(numpy.array([0, 0, 0, 0, 0])) self.assertListEqual(list(overlaps), trueOverlaps) self.assertListEqual(list(overlapsPct), trueOverlapsPct) sp._connectedSynapses = BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]) sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0]) inputVector = numpy.ones(sp._numInputs, dtype='float32') overlaps = sp._calculateOverlap(inputVector) overlapsPct = sp._calculateOverlapPct(overlaps) trueOverlaps = list(numpy.array([10, 8, 6, 4, 2], dtype=realDType)) trueOverlapsPct = list(numpy.array([1, 1, 1, 1, 1])) self.assertListEqual(list(overlaps), trueOverlaps) self.assertListEqual(list(overlapsPct), trueOverlapsPct) sp._connectedSynapses = BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]) sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0]) inputVector = numpy.zeros(sp._numInputs, dtype='float32') inputVector[9] = 1 overlaps = sp._calculateOverlap(inputVector) overlapsPct = sp._calculateOverlapPct(overlaps) trueOverlaps = list(numpy.array([1, 1, 1, 1, 1], dtype=realDType)) trueOverlapsPct = list(numpy.array([0.1, 0.125, (1.0 / 6), 0.25, 0.5])) self.assertListEqual(list(overlaps), trueOverlaps) self.assertListEqual(list(overlapsPct), trueOverlapsPct) sp._connectedSynapses = BinaryCorticalColumns([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 1]]) sp._connectedCounts = numpy.array([2.0, 2.0, 2.0, 2.0, 2.0]) inputVector = numpy.zeros(sp._numInputs, dtype='float32') inputVector[range(0, 10, 2)] = 1 overlaps = sp._calculateOverlap(inputVector) overlapsPct = sp._calculateOverlapPct(overlaps) trueOverlaps = list(numpy.array([1, 1, 1, 1, 1], dtype=realDType)) trueOverlapsPct = list(numpy.array([0.5, 0.5, 0.5, 0.5, 0.5])) self.assertListEqual(list(overlaps), trueOverlaps) self.assertListEqual(list(overlapsPct), trueOverlapsPct)
'test initial permanence generation. ensure that a correct amount of synapses are initialized in a connected state, with permanence values drawn from the correct ranges'
def testInitPermanence1(self):
sp = self._sp sp._inputDimensions = numpy.array([10]) sp._numInputs = 10 sp._raisePermanenceToThreshold = Mock() sp._potentialRadius = 2 connectedPct = 1 mask = numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1]) perm = sp._initPermanence(mask, connectedPct) connected = (perm >= sp._synPermConnected).astype(int) numcon = connected.nonzero()[0].size self.assertEqual(numcon, 5) connectedPct = 0 perm = sp._initPermanence(mask, connectedPct) connected = (perm >= sp._synPermConnected).astype(int) numcon = connected.nonzero()[0].size self.assertEqual(numcon, 0) connectedPct = 0.5 sp._potentialRadius = 100 sp._numInputs = 100 mask = numpy.ones(100) perm = sp._initPermanence(mask, connectedPct) connected = (perm >= sp._synPermConnected).astype(int) numcon = connected.nonzero()[0].size self.assertGreater(numcon, 0) self.assertLess(numcon, sp._numInputs) minThresh = 0.0 maxThresh = sp._synPermMax self.assertEqual(numpy.logical_and((perm >= minThresh), (perm <= maxThresh)).all(), True)
'Test initial permanence generation. ensure that permanence values are only assigned to bits within a column\'s potential pool.'
def testInitPermanence2(self):
sp = self._sp sp._raisePermanenceToThreshold = Mock() sp._numInputs = 10 connectedPct = 1 mask = numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) perm = sp._initPermanence(mask, connectedPct) connected = list((perm > 0).astype(int)) trueConnected = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0] self.assertListEqual(connected, trueConnected) sp._numInputs = 10 connectedPct = 1 mask = numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0]) perm = sp._initPermanence(mask, connectedPct) connected = list((perm > 0).astype(int)) trueConnected = [0, 0, 0, 0, 1, 1, 1, 0, 0, 0] self.assertListEqual(connected, trueConnected) sp._numInputs = 10 connectedPct = 1 mask = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) perm = sp._initPermanence(mask, connectedPct) connected = list((perm > 0).astype(int)) trueConnected = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1] self.assertListEqual(connected, trueConnected) sp._numInputs = 10 connectedPct = 1 mask = numpy.array([1, 1, 1, 1, 1, 1, 1, 0, 1, 1]) perm = sp._initPermanence(mask, connectedPct) connected = list((perm > 0).astype(int)) trueConnected = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1] self.assertListEqual(connected, trueConnected)
'Tests that duty cycles are updated properly according to the mathematical formula. also check the effects of supplying a maxPeriod to the function.'
def testUpdateDutyCycleHelper(self):
dc = numpy.zeros(5) dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0]) period = 1000 newvals = numpy.zeros(5) newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [999, 999, 999, 999, 999] self.assertListEqual(list(newDc), trueNewDc) dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0]) period = 1000 newvals = numpy.zeros(5) newvals.fill(1000) newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = list(dc) self.assertListEqual(list(newDc), trueNewDc) dc = numpy.array([1000, 1000, 1000, 1000, 1000]) newvals = numpy.array([2000, 4000, 5000, 6000, 7000]) period = 1000 newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [1001, 1003, 1004, 1005, 1006] self.assertListEqual(list(newDc), trueNewDc) dc = numpy.array([1000, 800, 600, 400, 2000]) newvals = numpy.zeros(5) period = 2 newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [500, 400, 300, 200, 1000] self.assertListEqual(list(newDc), trueNewDc)
'Tests that global inhibition correctly picks the correct top number of overlap scores as winning columns.'
def testInhibitColumnsGlobal(self):
sp = self._sp density = 0.3 sp._numColumns = 10 overlaps = numpy.array([1, 2, 1, 4, 8, 3, 12, 5, 4, 1], dtype=realDType) active = list(sp._inhibitColumnsGlobal(overlaps, density)) trueActive = numpy.zeros(sp._numColumns) trueActive = [4, 6, 7] self.assertListEqual(list(trueActive), sorted(active)) density = 0.5 sp._numColumns = 10 overlaps = numpy.array(range(10), dtype=realDType) active = list(sp._inhibitColumnsGlobal(overlaps, density)) trueActive = numpy.zeros(sp._numColumns) trueActive = range(5, 10) self.assertListEqual(trueActive, sorted(active))
'After feeding in a record the number of active columns should always be equal to numActivePerInhArea'
@unittest.skip('Ported from the removed FlatSpatialPooler but fails. See: https://github.com/numenta/nupic/issues/1897') def testActiveColumnsEqualNumActive(self):
for i in [1, 10, 50]: numActive = i inputShape = 10 sp = SpatialPooler(inputDimensions=[inputShape], columnDimensions=[100], numActiveColumnsPerInhArea=numActive) inputArray = (numpy.random.rand(inputShape) > 0.5).astype(uintDType) inputArray2 = (numpy.random.rand(inputShape) > 0.8).astype(uintDType) activeArray = numpy.zeros(sp._numColumns).astype(realDType) sp.compute(inputArray, True, activeArray) sp.compute(inputArray2, True, activeArray) self.assertEqual(sum(activeArray), numActive) sp.compute(inputArray, False, activeArray) sp.compute(inputArray2, False, activeArray) self.assertEqual(sum(activeArray), numActive)
'Creates a segment, destroys it, and makes sure it got destroyed along with all of its synapses.'
def testDestroySegment(self):
connections = Connections(1024) connections.createSegment(10) segment2 = connections.createSegment(20) connections.createSegment(30) connections.createSegment(40) connections.createSynapse(segment2, 80, 0.85) connections.createSynapse(segment2, 81, 0.85) connections.createSynapse(segment2, 82, 0.15) self.assertEqual(4, connections.numSegments()) self.assertEqual(3, connections.numSynapses()) connections.destroySegment(segment2) self.assertEqual(3, connections.numSegments()) self.assertEqual(0, connections.numSynapses()) (numActiveConnected, numActivePotential) = connections.computeActivity([80, 81, 82], 0.5) self.assertEqual(0, numActiveConnected[segment2.flatIdx]) self.assertEqual(0, numActivePotential[segment2.flatIdx])
'Creates a segment, creates a number of synapses on it, destroys a synapse, and makes sure it got destroyed.'
def testDestroySynapse(self):
connections = Connections(1024) segment = connections.createSegment(20) synapse1 = connections.createSynapse(segment, 80, 0.85) synapse2 = connections.createSynapse(segment, 81, 0.85) synapse3 = connections.createSynapse(segment, 82, 0.15) self.assertEqual(3, connections.numSynapses()) connections.destroySynapse(synapse2) self.assertEqual(2, connections.numSynapses()) self.assertEqual(set([synapse1, synapse3]), connections.synapsesForSegment(segment)) (numActiveConnected, numActivePotential) = connections.computeActivity([80, 81, 82], 0.5) self.assertEqual(1, numActiveConnected[segment.flatIdx]) self.assertEqual(2, numActivePotential[segment.flatIdx])
'Creates segments and synapses, then destroys segments and synapses on either side of them and verifies that existing Segment and Synapse instances still point to the same segment / synapse as before.'
def testPathsNotInvalidatedByOtherDestroys(self):
connections = Connections(1024) segment1 = connections.createSegment(11) connections.createSegment(12) segment3 = connections.createSegment(13) connections.createSegment(14) segment5 = connections.createSegment(15) synapse1 = connections.createSynapse(segment3, 201, 0.85) synapse2 = connections.createSynapse(segment3, 202, 0.85) synapse3 = connections.createSynapse(segment3, 203, 0.85) synapse4 = connections.createSynapse(segment3, 204, 0.85) synapse5 = connections.createSynapse(segment3, 205, 0.85) self.assertEqual(203, synapse3.presynapticCell) connections.destroySynapse(synapse1) self.assertEqual(203, synapse3.presynapticCell) connections.destroySynapse(synapse5) self.assertEqual(203, synapse3.presynapticCell) connections.destroySegment(segment1) self.assertEqual(set([synapse2, synapse3, synapse4]), connections.synapsesForSegment(segment3)) connections.destroySegment(segment5) self.assertEqual(set([synapse2, synapse3, synapse4]), connections.synapsesForSegment(segment3)) self.assertEqual(203, synapse3.presynapticCell)
'Destroy a segment that has a destroyed synapse and a non-destroyed synapse. Make sure nothing gets double-destroyed.'
def testDestroySegmentWithDestroyedSynapses(self):
connections = Connections(1024) segment1 = connections.createSegment(11) segment2 = connections.createSegment(12) connections.createSynapse(segment1, 101, 0.85) synapse2a = connections.createSynapse(segment2, 201, 0.85) connections.createSynapse(segment2, 202, 0.85) self.assertEqual(3, connections.numSynapses()) connections.destroySynapse(synapse2a) self.assertEqual(2, connections.numSegments()) self.assertEqual(2, connections.numSynapses()) connections.destroySegment(segment2) self.assertEqual(1, connections.numSegments()) self.assertEqual(1, connections.numSynapses())
'Destroy a segment that has a destroyed synapse and a non-destroyed synapse. Create a new segment in the same place. Make sure its synapse count is correct.'
def testReuseSegmentWithDestroyedSynapses(self):
connections = Connections(1024) segment = connections.createSegment(11) synapse1 = connections.createSynapse(segment, 201, 0.85) connections.createSynapse(segment, 202, 0.85) connections.destroySynapse(synapse1) self.assertEqual(1, connections.numSynapses(segment)) connections.destroySegment(segment) reincarnated = connections.createSegment(11) self.assertEqual(0, connections.numSynapses(reincarnated)) self.assertEqual(0, len(connections.synapsesForSegment(reincarnated)))
'Creates a synapse and updates its permanence, and makes sure that its data was correctly updated.'
def testUpdateSynapsePermanence(self):
connections = Connections(1024) segment = connections.createSegment(10) synapse = connections.createSynapse(segment, 50, 0.34) connections.updateSynapsePermanence(synapse, 0.21) synapseData = connections.dataForSynapse(synapse) self.assertAlmostEqual(synapseData.permanence, 0.21)
'Creates a sample set of connections, and makes sure that computing the activity for a collection of cells with no activity returns the right activity data.'
def testComputeActivity(self):
connections = Connections(1024) segment1a = connections.createSegment(10) connections.createSynapse(segment1a, 150, 0.85) connections.createSynapse(segment1a, 151, 0.15) segment2a = connections.createSegment(20) connections.createSynapse(segment2a, 80, 0.85) connections.createSynapse(segment2a, 81, 0.85) synapse = connections.createSynapse(segment2a, 82, 0.85) connections.updateSynapsePermanence(synapse, 0.15) inputVec = [50, 52, 53, 80, 81, 82, 150, 151] (numActiveConnected, numActivePotential) = connections.computeActivity(inputVec, 0.5) self.assertEqual(1, numActiveConnected[segment1a.flatIdx]) self.assertEqual(2, numActivePotential[segment1a.flatIdx]) self.assertEqual(2, numActiveConnected[segment2a.flatIdx]) self.assertEqual(3, numActivePotential[segment2a.flatIdx])
'Run the PY and CPP implementations side by side on random inputs. If seed is None a random seed will be chosen based on time, otherwise the fixed seed will be used. If learnMode is None learning will be randomly turned on and off. If it is False or True then set it accordingly. If convertEveryIteration is True, the CPP will be copied from the PY instance on every iteration just before each compute.'
def runSideBySide(self, params, seed=None, learnMode=None, convertEveryIteration=False):
randomState = getNumpyRandomGenerator(seed) cppSp = CreateSP('cpp', params) pySp = CreateSP('py', params) self.compare(pySp, cppSp) numColumns = pySp.getNumColumns() numInputs = pySp.getNumInputs() threshold = 0.8 inputMatrix = (randomState.rand(numRecords, numInputs) > threshold).astype(uintType) for i in xrange(numRecords): if (learnMode is None): learn = (randomState.rand() > 0.5) else: learn = learnMode if (self.verbosity > 1): print 'Iteration:', i, 'learn=', learn PyActiveArray = numpy.zeros(numColumns).astype(uintType) CppActiveArray = numpy.zeros(numColumns).astype(uintType) inputVector = inputMatrix[i, :] pySp.compute(inputVector, learn, PyActiveArray) cppSp.compute(inputVector, learn, CppActiveArray) self.assertListEqual(list(PyActiveArray), list(CppActiveArray)) self.compare(pySp, cppSp) cppBoostFactors = numpy.zeros(numColumns, dtype=realType) cppSp.getBoostFactors(cppBoostFactors) pySp.setBoostFactors(cppBoostFactors) if (convertEveryIteration or (((i + 1) % 10) == 0)): convertPermanences(pySp, cppSp)
'Check SP implementations have same behavior with 1D input.'
@unittest.skip('Currently fails due to non-fixed randomness in C++ SP.') def testCompatibilityCppPyDirectCall1D(self):
pySp = PySpatialPooler(inputDimensions=[121], columnDimensions=[300]) cppSp = CPPSpatialPooler(inputDimensions=[121], columnDimensions=[300]) data = numpy.zeros([121], dtype=uintType) for i in xrange(21): data[i] = 1 nCols = 300 d1 = numpy.zeros(nCols, dtype=uintType) d2 = numpy.zeros(nCols, dtype=uintType) pySp.compute(data, True, d1) cppSp.compute(data, True, d2) d1 = d1.nonzero()[0].tolist() d2 = d2.nonzero()[0].tolist() self.assertListEqual(d1, d2, ('SP outputs are not equal: \n%s \n%s' % (str(d1), str(d2))))
'Check SP implementations have same behavior with 2D input.'
@unittest.skip('Currently fails due to non-fixed randomness in C++ SP.') def testCompatibilityCppPyDirectCall2D(self):
pySp = PySpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30]) cppSp = CPPSpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30]) data = numpy.zeros([121, 1], dtype=uintType) for i in xrange(21): data[i][0] = 1 nCols = 900 d1 = numpy.zeros(nCols, dtype=uintType) d2 = numpy.zeros(nCols, dtype=uintType) pySp.compute(data, True, d1) cppSp.compute(data, True, d2) d1 = d1.nonzero()[0].tolist() d2 = d2.nonzero()[0].tolist() self.assertListEqual(d1, d2, ('SP outputs are not equal: \n%s \n%s' % (str(d1), str(d2))))
'Tests standard learning case for raw overlap'
def testOverlapDistanceMethodStandard(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32) numPatterns = classifier.learn(a, 0, isSparse=dimensionality) self.assertEquals(numPatterns, 1) numPatterns = classifier.learn(b, 1, isSparse=dimensionality) self.assertEquals(numPatterns, 2) denseA = np.zeros(dimensionality) denseA[a] = 1.0 (cat, _, _, _) = classifier.infer(denseA) self.assertEquals(cat, 0) denseB = np.zeros(dimensionality) denseB[b] = 1.0 (cat, _, _, _) = classifier.infer(denseB) self.assertEquals(cat, 1)
'Tests overlap distance with min sparsity'
def testMinSparsity(self):
params = {'distanceMethod': 'rawOverlap', 'minSparsity': 0.2} classifier = KNNClassifier(**params) dimensionality = 30 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) b = np.array([2, 4, 8, 12, 14, 18, 20, 21, 28], dtype=np.int32) c = np.array([2, 3, 8, 11, 14, 18], dtype=np.int32) d = np.array([2, 3, 8, 11, 18], dtype=np.int32) numPatterns = classifier.learn(a, 0, isSparse=dimensionality) self.assertEquals(numPatterns, 1) numPatterns = classifier.learn(b, 1, isSparse=dimensionality) self.assertEquals(numPatterns, 2) numPatterns = classifier.learn(c, 1, isSparse=dimensionality) self.assertEquals(numPatterns, 3) numPatterns = classifier.learn(d, 1, isSparse=dimensionality) self.assertEquals(numPatterns, 3) e = np.array([2, 4, 5, 6, 8, 12, 14, 18, 20], dtype=np.int32) dense = np.zeros(dimensionality) dense[e] = 1.0 (cat, inference, _, _) = classifier.infer(dense) self.assertIsNotNone(cat) self.assertGreater(inference.sum(), 0.0) f = np.array([2, 5, 8, 11, 14, 18], dtype=np.int32) dense = np.zeros(dimensionality) dense[f] = 1.0 (cat, inference, _, _) = classifier.infer(dense) self.assertIsNotNone(cat) self.assertGreater(inference.sum(), 0.0) g = np.array([2, 3, 8, 11, 19], dtype=np.int32) dense = np.zeros(dimensionality) dense[g] = 1.0 (cat, inference, _, _) = classifier.infer(dense) self.assertIsNone(cat) self.assertEqual(inference.sum(), 0.0)
'Tests that paritionId properly excludes training data points during inference'
def testPartitionIdExcluded(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32) denseA = np.zeros(dimensionality) denseA[a] = 1.0 denseB = np.zeros(dimensionality) denseB[b] = 1.0 classifier.learn(a, 0, isSparse=dimensionality, partitionId=0) classifier.learn(b, 1, isSparse=dimensionality, partitionId=1) (cat, _, _, _) = classifier.infer(denseA, partitionId=1) self.assertEquals(cat, 0) (cat, _, _, _) = classifier.infer(denseA, partitionId=0) self.assertEquals(cat, 1) (cat, _, _, _) = classifier.infer(denseB, partitionId=0) self.assertEquals(cat, 1) (cat, _, _, _) = classifier.infer(denseB, partitionId=1) self.assertEquals(cat, 0) classifier.learn(a, 0, isSparse=dimensionality, partitionId=2) (cat, _, _, _) = classifier.infer(denseA, partitionId=0) self.assertEquals(cat, 0)
'Test a sequence of calls to KNN to ensure we can retrieve partition Id: - We first learn on some patterns (including one pattern with no partitionId in the middle) and test that we can retrieve Ids. - We then invoke inference and then check partitionId again. - We check incorrect indices to ensure we get an exception. - We check the case where the partitionId to be ignored is not in the list. - We learn on one more pattern and check partitionIds again - We remove rows and ensure partitionIds still work'
def testGetPartitionId(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32) c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32) d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32) e = np.array([1, 3, 7, 12, 14, 19, 22, 24, 33], dtype=np.int32) denseA = np.zeros(dimensionality) denseA[a] = 1.0 classifier.learn(a, 0, isSparse=dimensionality, partitionId=433) classifier.learn(b, 1, isSparse=dimensionality, partitionId=213) classifier.learn(c, 1, isSparse=dimensionality, partitionId=None) classifier.learn(d, 1, isSparse=dimensionality, partitionId=433) self.assertEquals(classifier.getPartitionId(0), 433) self.assertEquals(classifier.getPartitionId(1), 213) self.assertEquals(classifier.getPartitionId(2), None) self.assertEquals(classifier.getPartitionId(3), 433) (cat, _, _, _) = classifier.infer(denseA, partitionId=213) self.assertEquals(cat, 0) (cat, _, _, _) = classifier.infer(denseA, partitionId=666) self.assertEquals(cat, 0) self.assertEquals(classifier.getPartitionId(0), 433) self.assertEquals(classifier.getPartitionId(1), 213) self.assertEquals(classifier.getPartitionId(2), None) self.assertEquals(classifier.getPartitionId(3), 433) with self.assertRaises(RuntimeError): classifier.getPartitionId(4) with self.assertRaises(RuntimeError): classifier.getPartitionId((-1)) classifier.learn(e, 4, isSparse=dimensionality, partitionId=413) self.assertEquals(classifier.getPartitionId(4), 413) self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433), [0, 3]) self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(666), []) self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413), [4]) self.assertEquals(classifier.getNumPartitionIds(), 3) self.assertItemsEqual(classifier.getPartitionIdList(), [433, 213, np.inf, 433, 413]) self.assertItemsEqual(classifier.getPartitionIdKeys(), [433, 413, 213]) self.assertEquals(classifier._removeRows([0, 2]), 2) self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433), [1]) self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413), [2]) classifier._removeRows([0]) self.assertEquals(classifier.getNumPartitionIds(), 2) self.assertItemsEqual(classifier.getPartitionIdList(), [433, 413]) self.assertItemsEqual(classifier.getPartitionIdKeys(), [433, 413])
'Tests that we can correctly retrieve partition Id even if the first few vectors do not have Ids'
def testGetPartitionIdWithNoIdsAtFirst(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32) c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32) d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32) denseA = np.zeros(dimensionality) denseA[a] = 1.0 denseD = np.zeros(dimensionality) denseD[d] = 1.0 classifier.learn(a, 0, isSparse=dimensionality, partitionId=None) classifier.learn(b, 1, isSparse=dimensionality, partitionId=None) classifier.learn(c, 2, isSparse=dimensionality, partitionId=211) classifier.learn(d, 1, isSparse=dimensionality, partitionId=405) (cat, _, _, _) = classifier.infer(denseA, partitionId=405) self.assertEquals(cat, 0) (cat, _, _, _) = classifier.infer(denseD, partitionId=405) self.assertEquals(cat, 2) (cat, _, _, _) = classifier.infer(denseD) self.assertEquals(cat, 1)
'Sparsity (input dimensionality) less than input array'
@unittest.skipUnless(__debug__, 'Only applicable when asserts are enabled') def testOverlapDistanceMethodBadSparsity(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) with self.assertRaises(AssertionError): classifier.learn(a, 0, isSparse=20)
'Inconsistent sparsity (input dimensionality)'
def testOverlapDistanceMethodInconsistentDimensionality(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32) numPatterns = classifier.learn(a, 0, isSparse=31) self.assertEquals(numPatterns, 1) denseA = np.zeros(dimensionality) denseA[a] = 1.0 (cat, _, _, _) = classifier.infer(denseA) self.assertEquals(cat, 0)
'If sparse representation indices are unsorted expect error.'
@unittest.skipUnless(__debug__, 'Only applicable when asserts are enabled') def testOverlapDistanceMethodStandardUnsorted(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([29, 3, 7, 11, 13, 17, 19, 23, 1], dtype=np.int32) b = np.array([2, 4, 20, 12, 14, 18, 8, 28, 30], dtype=np.int32) with self.assertRaises(AssertionError): classifier.learn(a, 0, isSparse=dimensionality) with self.assertRaises(AssertionError): classifier.learn(b, 1, isSparse=dimensionality)
'Tests case where pattern has no ON bits'
def testOverlapDistanceMethodEmptyArray(self):
params = {'distanceMethod': 'rawOverlap'} classifier = KNNClassifier(**params) dimensionality = 40 a = np.array([], dtype=np.int32) numPatterns = classifier.learn(a, 0, isSparse=dimensionality) self.assertEquals(numPatterns, 1) denseA = np.zeros(dimensionality) denseA[a] = 1.0 (cat, _, _, _) = classifier.infer(denseA) self.assertEquals(cat, 0)
'More complex test of checkpointing in the middle of a sequence.'
@unittest.skipUnless(capnp, 'pycapnp not installed') def testSerializationMiddleOfSequence2(self):
tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') with open(resource_filename(__name__, 'data/tm_input.csv'), 'r') as fin: reader = csv.reader(fin) records = [] for bottomUpInStr in fin: bottomUpIn = numpy.array(eval((('[' + bottomUpInStr.strip()) + ']')), dtype='int32') records.append(bottomUpIn) i = 1 for r in records[:250]: print i i += 1 output1 = tm1.compute(r, True, True) output2 = tm2.compute(r, True, True) self.assertTrue(numpy.array_equal(output1, output2)) print 'Serializing and deserializing models.' savePath1 = os.path.join(self._tmpDir, 'tm1.bin') tmProto1 = BacktrackingTM.getSchema().new_message() tm1.write(tmProto1) with open(savePath1, 'wb') as f: tmProto1.write(f) with open(savePath1, 'rb') as f: tmProto3 = BacktrackingTM.getSchema().read(f) tm3 = BacktrackingTM.read(tmProto3) savePath2 = os.path.join(self._tmpDir, 'tm2.bin') tmProto2 = BacktrackingTM.getSchema().new_message() tm2.write(tmProto2) with open(savePath2, 'wb') as f: tmProto2.write(f) with open(savePath2, 'rb') as f: tmProto4 = BacktrackingTM.getSchema().read(f) tm4 = BacktrackingTM.read(tmProto4) self.assertTMsEqual(tm1, tm3) self.assertTMsEqual(tm2, tm4) for r in records[250:]: print i i += 1 out1 = tm1.compute(r, True, True) out2 = tm2.compute(r, True, True) out3 = tm3.compute(r, True, True) out4 = tm4.compute(r, True, True) self.assertTrue(numpy.array_equal(out1, out2)) self.assertTrue(numpy.array_equal(out1, out3)) self.assertTrue(numpy.array_equal(out1, out4)) self.assertTMsEqual(tm1, tm2) self.assertTMsEqual(tm1, tm3) self.assertTMsEqual(tm2, tm4)
'More complex test of checkpointing in the middle of a sequence.'
def testCheckpointMiddleOfSequence2(self):
tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') with open(resource_filename(__name__, 'data/tm_input.csv'), 'r') as fin: reader = csv.reader(fin) records = [] for bottomUpInStr in fin: bottomUpIn = numpy.array(eval((('[' + bottomUpInStr.strip()) + ']')), dtype='int32') records.append(bottomUpIn) i = 1 for r in records[:250]: print i i += 1 output1 = tm1.compute(r, True, True) output2 = tm2.compute(r, True, True) self.assertTrue(numpy.array_equal(output1, output2)) print 'Serializing and deserializing models.' savePath1 = os.path.join(self._tmpDir, 'tm1.bin') tm1.saveToFile(savePath1) tm3 = pickle.loads(pickle.dumps(tm1)) tm3.loadFromFile(savePath1) savePath2 = os.path.join(self._tmpDir, 'tm2.bin') tm2.saveToFile(savePath2) tm4 = pickle.loads(pickle.dumps(tm2)) tm4.loadFromFile(savePath2) self.assertTMsEqual(tm1, tm3) self.assertTMsEqual(tm2, tm4) for r in records[250:]: print i i += 1 out1 = tm1.compute(r, True, True) out2 = tm2.compute(r, True, True) out3 = tm3.compute(r, True, True) out4 = tm4.compute(r, True, True) self.assertTrue(numpy.array_equal(out1, out2)) self.assertTrue(numpy.array_equal(out1, out3)) self.assertTrue(numpy.array_equal(out1, out4)) self.assertTMsEqual(tm1, tm2) self.assertTMsEqual(tm1, tm3) self.assertTMsEqual(tm2, tm4)
'Asserts that two TM instances are the same. This is temporarily disabled since it does not work with the C++ implementation of the TM.'
def assertTMsEqual(self, tm1, tm2):
self.assertEqual(tm1, tm2, tm1.diff(tm2)) self.assertTrue(fdrutilities.tmDiff2(tm1, tm2, 1, False))
'Generates a sequence of n patterns.'
@staticmethod def generateSequence(n=10, numCols=100, minOnes=21, maxOnes=25):
return ([None] + [BacktrackingTMTest.generatePattern(numCols, minOnes, maxOnes) for _ in xrange(n)])
'Generate a single test pattern with given parameters. Parameters: numCols: Number of columns in each pattern. minOnes: The minimum number of 1\'s in each pattern. maxOnes: The maximum number of 1\'s in each pattern.'
@staticmethod def generatePattern(numCols=100, minOnes=21, maxOnes=25):
assert (minOnes < maxOnes) assert (maxOnes < numCols) nOnes = random.randint(minOnes, maxOnes) ind = random.sample(xrange(numCols), nOnes) x = numpy.zeros(numCols, dtype='float32') x[ind] = 1 return x
'Set various constants. Create the input patterns and the spatial pooler'
def setUp(self):
self.inputSize = 90 self.columnDimensions = 600 self.x = numpy.zeros((5, self.inputSize), dtype=uintType) self.x[0, 0:20] = 1 self.x[1, 10:30] = 1 self.x[2, 30:50] = 1 self.x[3, 50:70] = 1 self.x[4, 70:90] = 1 self.winningIteration = numpy.zeros(self.columnDimensions) self.lastSDR = {} self.spImplementation = 'None' self.sp = None self.params = {'inputDimensions': [self.inputSize], 'columnDimensions': [self.columnDimensions], 'potentialRadius': self.inputSize, 'potentialPct': 0.9, 'globalInhibition': True, 'numActiveColumnsPerInhArea': 60, 'synPermActiveInc': 0.0, 'synPermInactiveDec': 0.0, 'dutyCyclePeriod': 10, 'boostStrength': 10.0, 'seed': SEED} print 'SP seed set to:', self.params['seed']
'Helpful debug print statements while debugging this test.'
def debugPrint(self):
activeDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal()) self.sp.getActiveDutyCycles(activeDutyCycle) boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal()) self.sp.getBoostFactors(boost) print '\n--------- ITERATION', self.sp.getIterationNum(), '-----------------------' print 'SP implementation:', self.spImplementation print 'Learning iteration:', print 'Max/min active duty cycle:', (activeDutyCycle.max(), activeDutyCycle.min()) print 'Average non-zero active duty cycle:', activeDutyCycle[(activeDutyCycle > 0)].mean() print 'Active duty cycle', activeDutyCycle print print 'Boost factor for sp:', boost print print 'Last winning iteration for each column' print self.winningIteration print 'Number of columns that have won at some point:', (self.columnDimensions - (self.winningIteration == 0).sum())
'Verify that all SDRs have the properties desired for this test. The bounds for checking overlap are set fairly loosely here since there is some variance due to randomness and the artificial parameters used in this test.'
def verifySDRProperties(self):
self.assertTrue(_areAllSDRsUnique(self.lastSDR), "All SDR's are not unique") self.assertGreater(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 9, "First two SDR's don't overlap much") for i in [2, 3, 4]: for j in range(5): if (i != j): self.assertLess(_computeOverlap(self.lastSDR[i], self.lastSDR[j]), 18, 'One of the last three SDRs has high overlap')
'Main test loop.'
def boostTestLoop(self, imp):
self.sp = CreateSP(imp, self.params) self.spImplementation = imp self.winningIteration.fill(0) self.lastSDR = {} self.boostTestPhase1() self.boostTestPhase2() self.boostTestPhase3() self.boostTestPhase4()
'Test if the firing number of coincidences after inhibition equals spatial pooler numActiveColumnsPerInhArea.'
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler.The new SP doesn't have explicit methods to get inhibition.") def testInhibition(self):
n = 100 w = 15 inputLen = 300 columnDimensions = 2048 numActiveColumnsPerInhArea = 40 stimulusThreshold = 0 spSeed = 1956 stimulusThresholdInh = 1e-05 kDutyCycleFactor = 0.01 spVerbosity = 0 testIter = 100 spTest = SpatialPooler(columnDimensions=(columnDimensions, 1), inputDimensions=(1, inputLen), potentialRadius=(inputLen / 2), numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, seed=spSeed) initialPermanence = spTest._initialPermanence() (spTest._masterPotentialM, spTest._masterPermanenceM) = spTest._makeMasterCoincidences(spTest.numCloneMasters, spTest._coincRFShape, spTest.potentialPct, initialPermanence, spTest.random) spTest._updateInhibitionObj() boostFactors = numpy.ones(columnDimensions) for i in range(testIter): spTest._iterNum = i input_ = numpy.zeros((1, inputLen)) nonzero = numpy.random.random(inputLen) input_[0][numpy.where((nonzero < (float(w) / float(n))))] = 1 spTest._computeOverlapsFP(input_, stimulusThreshold=spTest.stimulusThreshold) spTest._overlaps *= boostFactors onCellIndices = numpy.where((spTest._overlaps > 0)) spTest._onCells.fill(0) spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells spTest.dutyCyclePeriod = min((i + 1), 1000) spTest._dutyCycleBeforeInh = ((((spTest.dutyCyclePeriod - 1) * spTest._dutyCycleBeforeInh) + denseOn) / spTest.dutyCyclePeriod) dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy() dutyCycleTieBreaker *= kDutyCycleFactor numOn = spTest._inhibitionObj.compute((spTest._overlaps + dutyCycleTieBreaker), spTest._onCellIndices, stimulusThresholdInh, (max(spTest._overlaps) / 1000)) spTest._onCells.fill(0) onCellIndices = spTest._onCellIndices[0:numOn] spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells spTest._dutyCycleAfterInh = ((((spTest.dutyCyclePeriod - 1) * spTest._dutyCycleAfterInh) + denseOn) / spTest.dutyCyclePeriod) spTest._adaptSynapses(onCellIndices, [], input_) spTest._updateBoostFactors() boostFactors = spTest._firingBoostFactors if (((spTest._iterNum + 1) % 50) == 0): spTest._updateInhibitionObj() spTest._updateMinDutyCycles(spTest._dutyCycleBeforeInh, spTest.minPctDutyCycleBeforeInh, spTest._minDutyCycleBeforeInh) spTest._updateMinDutyCycles(spTest._dutyCycleAfterInh, spTest.minPctDutyCycleAfterInh, spTest._minDutyCycleAfterInh) self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea, ('Error at input %s, actual numOn are: %i, numActivePerInhAre is: %s' % (i, numOn, numActiveColumnsPerInhArea)))
'Basic test (creation, pickling, basic run of learning and inference)'
def basicTest(self):
tm = BacktrackingTMCPP(numberOfCols=10, cellsPerColumn=3, initialPerm=0.2, connectedPerm=0.8, minThreshold=2, newSynapseCount=5, permanenceInc=0.1, permanenceDec=0.05, permanenceMax=1, globalDecay=0.05, activationThreshold=4, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY) tm.retrieveLearningStates = True tm.makeCells4Ephemeral = False pickle.dump(tm, open('test_tm_cpp.pkl', 'wb')) tm2 = pickle.load(open('test_tm_cpp.pkl')) self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False)) for i in xrange(5): x = numpy.zeros(tm.numberOfCols, dtype='uint32') _RGEN.initializeUInt32Array(x, 2) tm.learn(x) tm.reset() tm.makeCells4Ephemeral = False pickle.dump(tm, open('test_tm_cpp.pkl', 'wb')) tm2 = pickle.load(open('test_tm_cpp.pkl')) self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY)) patterns = numpy.zeros((4, tm.numberOfCols), dtype='uint32') for i in xrange(4): _RGEN.initializeUInt32Array(patterns[i], 2) for i in xrange(10): x = numpy.zeros(tm.numberOfCols, dtype='uint32') _RGEN.initializeUInt32Array(x, 2) tm.infer(x) if (i > 0): tm._checkPrediction(patterns)
'Basic test (basic run of learning and inference)'
def basicTest2(self, tm, numPatterns=100, numRepetitions=3, activity=15, testTrimming=False, testRebuild=False):
tmPy = BacktrackingTM(numberOfCols=tm.numberOfCols, cellsPerColumn=tm.cellsPerColumn, initialPerm=tm.initialPerm, connectedPerm=tm.connectedPerm, minThreshold=tm.minThreshold, newSynapseCount=tm.newSynapseCount, permanenceInc=tm.permanenceInc, permanenceDec=tm.permanenceDec, permanenceMax=tm.permanenceMax, globalDecay=tm.globalDecay, activationThreshold=tm.activationThreshold, doPooling=tm.doPooling, segUpdateValidDuration=tm.segUpdateValidDuration, pamLength=tm.pamLength, maxAge=tm.maxAge, maxSeqLength=tm.maxSeqLength, maxSegmentsPerCell=tm.maxSegmentsPerCell, maxSynapsesPerSegment=tm.maxSynapsesPerSegment, seed=tm.seed, verbosity=tm.verbosity) tm.retrieveLearningStates = True verbosity = VERBOSITY sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns, length=tm.numberOfCols, activity=activity) for r in xrange(numRepetitions): for i in xrange(sequence.nRows()): if ((i % 10) == 0): tm.reset() tmPy.reset() if (verbosity >= 2): print '\n\n ===================================\nPattern:', print i, 'Round:', r, 'input:', sequence.getRow(i) y1 = tm.learn(sequence.getRow(i)) y2 = tmPy.learn(sequence.getRow(i)) if testRebuild: tm.cells4.rebuildOutSynapses() if testTrimming: tm.trimSegments() tmPy.trimSegments() if (verbosity > 2): print '\n ------ CPP states ------ ', tm.printStates() print '\n ------ PY states ------ ', tmPy.printStates() if (verbosity > 6): print 'C++ cells: ' tm.printCells() print 'PY cells: ' tmPy.printCells() if (verbosity >= 3): print 'Num segments in PY and C++', tmPy.getNumSegments(), tm.getNumSegments() self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity, False)) self.assertLess(abs((y1 - y2).sum()), 3) print 'Learning completed' self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity)) print 'Rebuilding outSynapses' tm.cells4.rebuildOutSynapses() self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY)) print 'Trimming segments' tm.trimSegments() tmPy.trimSegments() self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY)) print 'Pickling and unpickling' tm.makeCells4Ephemeral = False pickle.dump(tm, open('test_tm_cpp.pkl', 'wb')) tm2 = pickle.load(open('test_tm_cpp.pkl')) self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False)) print 'Testing inference' tm.reset() tmPy.reset() setVerbosity(INFERENCE_VERBOSITY, tm, tmPy) patterns = numpy.zeros((40, tm.numberOfCols), dtype='uint32') for i in xrange(4): _RGEN.initializeUInt32Array(patterns[i], 2) for (i, x) in enumerate(patterns): x = numpy.zeros(tm.numberOfCols, dtype='uint32') _RGEN.initializeUInt32Array(x, 2) y = tm.infer(x) yPy = tmPy.infer(x) self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY, checkLearn=False)) if (abs((y - yPy).sum()) > 0): print 'C++ output', y print 'Py output', yPy assert False if (i > 0): tm._checkPrediction(patterns) tmPy._checkPrediction(patterns) print 'Inference completed' print '====================================' return (tm, tmPy)
'Call basicTest2 with multiple parameter settings and ensure the C++ and PY versions are identical throughout.'
def testTMs(self, short=True):
if (short == True): print 'Testing short version' else: print 'Testing long version' if short: print '\nTesting with fixed resource CLA - test max segment and synapses' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.5, connectedPerm=0.5, permanenceMax=1, minThreshold=8, newSynapseCount=10, permanenceInc=0.1, permanenceDec=0.01, globalDecay=0.0, activationThreshold=8, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, maxAge=0, maxSegmentsPerCell=2, maxSynapsesPerSegment=10, checkSynapseConsistency=True) tm.cells4.setCellSegmentOrder(True) self.basicTest2(tm, numPatterns=15, numRepetitions=1) if (not short): print '\nTesting with fixed resource CLA - test max segment and synapses' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.5, connectedPerm=0.5, permanenceMax=1, minThreshold=8, newSynapseCount=10, permanenceInc=0.1, permanenceDec=0.01, globalDecay=0.0, activationThreshold=8, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, maxAge=0, maxSegmentsPerCell=2, maxSynapsesPerSegment=10, checkSynapseConsistency=True) tm.cells4.setCellSegmentOrder(1) self.basicTest2(tm, numPatterns=30, numRepetitions=2) print '\nTesting with permanenceInc = 0 and Dec = 0' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.5, connectedPerm=0.5, minThreshold=3, newSynapseCount=3, permanenceInc=0.0, permanenceDec=0.0, permanenceMax=1, globalDecay=0.0, activationThreshold=3, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency=False) tm.printParameters() self.basicTest2(tm, numPatterns=30, numRepetitions=3) print 'Testing with permanenceInc = 0 and Dec = 0 and 1 cell per column' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=1, initialPerm=0.5, connectedPerm=0.5, minThreshold=3, newSynapseCount=3, permanenceInc=0.0, permanenceDec=0.0, permanenceMax=1, globalDecay=0.0, activationThreshold=3, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency=False) self.basicTest2(tm) print 'Testing with permanenceInc = 0.1 and Dec = .0' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.5, connectedPerm=0.5, minThreshold=3, newSynapseCount=3, permanenceInc=0.1, permanenceDec=0.0, permanenceMax=1, globalDecay=0.0, activationThreshold=3, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency=False) self.basicTest2(tm) print 'Testing with permanenceInc = 0.1, Dec = .01 and higher synapse count' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=2, initialPerm=0.5, connectedPerm=0.5, minThreshold=3, newSynapseCount=5, permanenceInc=0.1, permanenceDec=0.01, permanenceMax=1, globalDecay=0.0, activationThreshold=3, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency=True) self.basicTest2(tm, numPatterns=10, numRepetitions=2) print 'Testing age based global decay' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.4, connectedPerm=0.5, minThreshold=3, newSynapseCount=3, permanenceInc=0.1, permanenceDec=0.1, permanenceMax=1, globalDecay=0.25, activationThreshold=3, doPooling=False, segUpdateValidDuration=5, pamLength=2, maxAge=20, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency=True) tm.cells4.setCellSegmentOrder(1) self.basicTest2(tm) print '\nTesting with fixed size CLA, max segments per cell' tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initialPerm=0.5, connectedPerm=0.5, permanenceMax=1, minThreshold=8, newSynapseCount=10, permanenceInc=0.1, permanenceDec=0.01, globalDecay=0.0, activationThreshold=8, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY, maxAge=0, maxSegmentsPerCell=2, maxSynapsesPerSegment=100, checkSynapseConsistency=True) tm.cells4.setCellSegmentOrder(1) self.basicTest2(tm, numPatterns=30, numRepetitions=2)
'When a segment becomes active, grow synapses to previous winner cells. The number of grown synapses is calculated from the "matching segment" overlap, not the "active segment" overlap.'
def testActiveSegmentGrowSynapsesAccordingToPotentialOverlap(self):
tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=2, initialPermanence=0.21, connectedPermanence=0.5, minThreshold=1, maxNewSynapseCount=4, permanenceIncrement=0.1, permanenceDecrement=0.1, predictedSegmentDecrement=0.0, seed=42) previousActiveColumns = [0, 1, 2, 3, 4] prevWinnerCells = [0, 1, 2, 3, 4] activeColumns = [5] activeSegment = tm.createSegment(5) tm.connections.createSynapse(activeSegment, 0, 0.5) tm.connections.createSynapse(activeSegment, 1, 0.5) tm.connections.createSynapse(activeSegment, 2, 0.2) tm.compute(previousActiveColumns, True) self.assertEqual(prevWinnerCells, tm.getWinnerCells()) tm.compute(activeColumns, True) presynapticCells = set((synapse.presynapticCell for synapse in tm.connections.synapsesForSegment(activeSegment))) self.assertTrue(((presynapticCells == set([0, 1, 2, 3])) or (presynapticCells == set([0, 1, 2, 4]))))
'Destroy some segments then verify that the maxSegmentsPerCell is still correctly applied.'
def testDestroySegmentsThenReachLimit(self):
tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=3, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=2, maxNewSynapseCount=3, permanenceIncrement=0.02, permanenceDecrement=0.02, predictedSegmentDecrement=0.0, seed=42, maxSegmentsPerCell=2) segment1 = tm.createSegment(11) segment2 = tm.createSegment(11) self.assertEqual(2, tm.connections.numSegments()) tm.connections.destroySegment(segment1) tm.connections.destroySegment(segment2) self.assertEqual(0, tm.connections.numSegments()) tm.createSegment(11) self.assertEqual(1, tm.connections.numSegments()) tm.createSegment(11) self.assertEqual(2, tm.connections.numSegments()) segment3 = tm.createSegment(11) self.assertEqual(2, tm.connections.numSegments(11)) self.assertEqual(2, tm.connections.numSegments())
'Hit the maxSegmentsPerCell threshold multiple times. Make sure it works more than once.'
def testReachSegmentLimitMultipleTimes(self):
tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=3, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=2, maxNewSynapseCount=3, permanenceIncrement=0.02, permanenceDecrement=0.02, predictedSegmentDecrement=0.0, seed=42, maxSegmentsPerCell=2) tm.createSegment(10) self.assertEqual(1, tm.connections.numSegments()) tm.createSegment(10) self.assertEqual(2, tm.connections.numSegments()) tm.createSegment(10) self.assertEqual(2, tm.connections.numSegments()) tm.createSegment(10) self.assertEqual(2, tm.connections.numSegments())
'ensure historicWindowSize is greater than estimationSamples'
def testParamterError(self):
try: anomalyLikelihoodRegion = AnomalyLikelihoodRegion(estimationSamples=100, historicWindowSize=99) self.assertEqual(False, True, 'Should have failed with ValueError') except ValueError: pass
'test to see if the region keeps track of state correctly and produces the same likelihoods as the AnomalyLikelihood module'
def testLikelihoodValues(self):
anomalyLikelihoodRegion = AnomalyLikelihoodRegion() anomalyLikelihood = AnomalyLikelihood() inputs = AnomalyLikelihoodRegion.getSpec()['inputs'] outputs = AnomalyLikelihoodRegion.getSpec()['outputs'] with open(_INPUT_DATA_FILE) as f: reader = csv.reader(f) reader.next() for record in reader: consumption = float(record[1]) anomalyScore = float(record[2]) likelihood1 = anomalyLikelihood.anomalyProbability(consumption, anomalyScore) inputs['rawAnomalyScore'] = numpy.array([anomalyScore]) inputs['metricValue'] = numpy.array([consumption]) anomalyLikelihoodRegion.compute(inputs, outputs) likelihood2 = outputs['anomalyLikelihood'][0] self.assertEqual(likelihood1, likelihood2)
'test to ensure serialization preserves the state of the region correctly.'
@unittest.skipUnless(capnp, 'pycapnp is not installed, skipping serialization test.') def testSerialization(self):
anomalyLikelihoodRegion1 = AnomalyLikelihoodRegion() inputs = AnomalyLikelihoodRegion.getSpec()['inputs'] outputs = AnomalyLikelihoodRegion.getSpec()['outputs'] for _ in xrange(0, 6): inputs['rawAnomalyScore'] = numpy.array([random.random()]) inputs['metricValue'] = numpy.array([random.random()]) anomalyLikelihoodRegion1.compute(inputs, outputs) score1 = outputs['anomalyLikelihood'][0] proto1 = AnomalyLikelihoodRegionProto.new_message() anomalyLikelihoodRegion1.write(proto1) with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = AnomalyLikelihoodRegionProto.read(f) anomalyLikelihoodRegion2 = AnomalyLikelihoodRegion.read(proto2) self.assertEqual(anomalyLikelihoodRegion1, anomalyLikelihoodRegion2) for _ in xrange(6, 500): inputs['rawAnomalyScore'] = numpy.array([random.random()]) inputs['metricValue'] = numpy.array([random.random()]) anomalyLikelihoodRegion1.compute(inputs, outputs) score1 = outputs['anomalyLikelihood'][0] anomalyLikelihoodRegion2.compute(inputs, outputs) score2 = outputs['anomalyLikelihood'][0] self.assertEqual(score1, score2)
'This test ensures that records in classifier are removed when they are no longer being used when the trainRecords is set.'
@patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord') def testSetGetWaitRecordsRecalculate(self, getRecord):
self.helper.cacheSize = 5 self.helper.anomalyThreshold = 0.8 self.helper._anomalyVectorLength = 20 records = [Mock(ROWID=10, anomalyLabel=['Test'], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1, 4])), Mock(ROWID=11, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 2])), Mock(ROWID=12, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 4])), Mock(ROWID=13, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 2, 6, 7])), Mock(ROWID=14, anomalyLabel=['Test'], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1, 10])), Mock(ROWID=15, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 3])), Mock(ROWID=16, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 4])), Mock(ROWID=17, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([10])), Mock(ROWID=18, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1, 4]))] getRecord.side_effect = records for i in records: self.helper.compute(dict(), dict()) self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 6) self.assertEqual(self.helper._knnclassifier.getParameter('categoryRecencyList'), [10, 12, 14, 16, 17, 18], 'Classifier incorrectly classified test records.') self.helper.setParameter('trainRecords', None, 14) self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 2) self.assertEqual(self.helper._knnclassifier.getParameter('categoryRecencyList'), [14, 17], 'Classifier incorrectly reclassified test records after setting trainRecords')
'Testing ScalarEncoder...'
def testScalarEncoder(self):
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True) empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(empty.sum(), 0)
'test NaNs'
def testNaNs(self):
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True) empty = mv.encode(float('nan')) self.assertEqual(empty.sum(), 0)
'Test bottom-up encoding for a Periodic encoder'
def testBottomUpEncodingPeriodicEncoder(self):
l = ScalarEncoder(n=14, w=3, minval=1, maxval=8, periodic=True, forced=True) self.assertEqual(l.getDescription(), [('[1:8]', 0)]) l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True) self.assertEqual(l.getDescription(), [('scalar', 0)]) self.assertTrue(numpy.array_equal(l.encode(3), numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(3.1), l.encode(3))) self.assertTrue(numpy.array_equal(l.encode(3.5), numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(3.6), l.encode(3.5))) self.assertTrue(numpy.array_equal(l.encode(3.7), l.encode(3.5))) self.assertTrue(numpy.array_equal(l.encode(4), numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(1), numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(1.5), numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(7), numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(7.5), numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], dtype=defaultDtype))) self.assertEqual(l.resolution, 0.5) self.assertEqual(l.radius, 1.5)
'Test that we get the same encoder when we construct it using resolution instead of n'
def testCreateResolution(self):
l = self._l d = l.__dict__ l = ScalarEncoder(name='scalar', resolution=0.5, w=3, minval=1, maxval=8, periodic=True, forced=True) self.assertEqual(l.__dict__, d) l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8, periodic=True, forced=True) self.assertEqual(l.__dict__, d)
'Test the input description generation, top-down compute, and bucket support on a periodic encoder'
def testDecodeAndResolution(self):
l = self._l v = l.minval while (v < l.maxval): output = l.encode(v) decoded = l.decode(output) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) self.assertEqual(len(fieldNames), 1) self.assertEqual(fieldNames, fieldsDict.keys()) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) (rangeMin, rangeMax) = ranges[0] self.assertEqual(rangeMin, rangeMax) self.assertLess(abs((rangeMin - v)), l.resolution) topDown = l.topDownCompute(output)[0] self.assertTrue(numpy.array_equal(topDown.encoding, output)) self.assertLessEqual(abs((topDown.value - v)), (l.resolution / 2)) bucketIndices = l.getBucketIndices(v) topDown = l.getBucketInfo(bucketIndices)[0] self.assertLessEqual(abs((topDown.value - v)), (l.resolution / 2)) self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]]) self.assertEqual(topDown.scalar, topDown.value) self.assertTrue(numpy.array_equal(topDown.encoding, output)) v += (l.resolution / 4) l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8, periodic=True, forced=True) decoded = l.decode(numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertTrue(numpy.array_equal(ranges[0], [7.5, 7.5])) decoded = l.decode(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 2) self.assertTrue(numpy.array_equal(ranges[0], [7.5, 8])) self.assertTrue(numpy.array_equal(ranges[1], [1, 1])) decoded = l.decode(numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertTrue(numpy.array_equal(ranges[0], [1.5, 2.5])) decoded = l.decode(numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0])) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 2) self.assertTrue(numpy.array_equal(ranges[0], [1.5, 1.5])) self.assertTrue(numpy.array_equal(ranges[1], [5.5, 6.0])) decoded = l.decode(numpy.array([0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0])) (fieldsDict, fieldNames) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertTrue(len(ranges), 2) self.assertTrue(numpy.array_equal(ranges[0], [1.5, 1.5])) self.assertTrue(numpy.array_equal(ranges[1], [5.5, 6.0]))
'Test closenessScores for a periodic encoder'
def testCloseness(self):
encoder = ScalarEncoder(w=7, minval=0, maxval=7, radius=1, periodic=True, name='day of week', forced=True) scores = encoder.closenessScores((2, 4, 7), (4, 2, 1), fractional=False) for (actual, score) in itertools.izip((2, 2, 1), scores): self.assertEqual(actual, score)
'Test Non-periodic encoder bottom-up'
def testNonPeriodicBottomUp(self):
l = ScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True) self.assertTrue(numpy.array_equal(l.encode(1), numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(2), numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(l.encode(10), numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=defaultDtype))) d = l.__dict__ l = ScalarEncoder(name='scalar', resolution=1, w=5, minval=1, maxval=10, periodic=False, forced=True) self.assertEqual(l.__dict__, d) l = ScalarEncoder(name='scalar', radius=5, w=5, minval=1, maxval=10, periodic=False, forced=True) self.assertEqual(l.__dict__, d) v = l.minval while (v < l.maxval): output = l.encode(v) decoded = l.decode(output) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) (rangeMin, rangeMax) = ranges[0] self.assertEqual(rangeMin, rangeMax) self.assertLess(abs((rangeMin - v)), l.resolution) topDown = l.topDownCompute(output)[0] self.assertTrue(numpy.array_equal(topDown.encoding, output)) self.assertLessEqual(abs((topDown.value - v)), l.resolution) bucketIndices = l.getBucketIndices(v) topDown = l.getBucketInfo(bucketIndices)[0] self.assertLessEqual(abs((topDown.value - v)), (l.resolution / 2)) self.assertEqual(topDown.scalar, topDown.value) self.assertTrue(numpy.array_equal(topDown.encoding, output)) v += (l.resolution / 4) decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1])) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertTrue(numpy.array_equal(ranges[0], [10, 10])) decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1])) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertTrue(numpy.array_equal(ranges[0], [10, 10])) l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=10, periodic=False, forced=True) decoded = l.topDownCompute(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]))[0] self.assertEqual(decoded.value, 10) decoded = l.topDownCompute(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))[0] self.assertEqual(decoded.value, 1) l = ScalarEncoder(name='scalar', n=140, w=3, minval=1, maxval=141, periodic=False, forced=True) for i in range(137): iterlist = [0 for _ in range(140)] for j in range(i, (i + 3)): iterlist[j] = 1 npar = numpy.array(iterlist) decoded = l.topDownCompute(npar)[0] self.assertLessEqual(decoded.value, 141) self.assertGreaterEqual(decoded.value, 1) self.assertTrue(((decoded.value < 141) or (i == 137))) self.assertTrue(((decoded.value > 1) or (i == 0))) l = ScalarEncoder(name='scalar', n=15, w=3, minval=0.001, maxval=0.002, periodic=False, forced=True) v = l.minval while (v < l.maxval): output = l.encode(v) decoded = l.decode(output) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) (rangeMin, rangeMax) = ranges[0] self.assertEqual(rangeMin, rangeMax) self.assertLess(abs((rangeMin - v)), l.resolution) topDown = l.topDownCompute(output)[0].value self.assertLessEqual(abs((topDown - v)), (l.resolution / 2)) v += (l.resolution / 4) l = ScalarEncoder(name='scalar', n=15, w=3, minval=1, maxval=1000000000, periodic=False, forced=True) v = l.minval while (v < l.maxval): output = l.encode(v) decoded = l.decode(output) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) (rangeMin, rangeMax) = ranges[0] self.assertEqual(rangeMin, rangeMax) self.assertLess(abs((rangeMin - v)), l.resolution) topDown = l.topDownCompute(output)[0].value self.assertLessEqual(abs((topDown - v)), (l.resolution / 2)) v += (l.resolution / 4)
'Ensures that passing resolution as an int doesn\'t truncate values.'
def testGetBucketInfoIntResolution(self):
encoder = ScalarEncoder(w=3, resolution=1, minval=1, maxval=8, periodic=True, forced=True) self.assertEqual(4.5, encoder.topDownCompute(encoder.encode(4.5))[0].scalar)
'Test ScalarEncoder Cap\'n Proto serialization implementation.'
@unittest.skipUnless(capnp, 'pycapnp is not installed, skipping serialization test.') def testReadWrite(self):
originalValue = self._l.encode(1) proto1 = ScalarEncoderProto.new_message() self._l.write(proto1) with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = ScalarEncoderProto.read(f) encoder = ScalarEncoder.read(proto2) self.assertIsInstance(encoder, ScalarEncoder) self.assertEqual(encoder.w, self._l.w) self.assertEqual(encoder.minval, self._l.minval) self.assertEqual(encoder.maxval, self._l.maxval) self.assertEqual(encoder.periodic, self._l.periodic) self.assertEqual(encoder.n, self._l.n) self.assertEqual(encoder.radius, self._l.radius) self.assertEqual(encoder.resolution, self._l.resolution) self.assertEqual(encoder.name, self._l.name) self.assertEqual(encoder.verbosity, self._l.verbosity) self.assertEqual(encoder.clipInput, self._l.clipInput) self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue)) self.assertEqual(self._l.decode(encoder.encode(1)), encoder.decode(self._l.encode(1))) result1 = self._l.encode(7) result2 = encoder.encode(7) self.assertTrue(numpy.array_equal(result1, result2))
'Setting n when maxval/minval = None creates instance.'
def testSettingNWithMaxvalMinvalNone(self):
encoder = ScalarEncoder(3, None, None, name='scalar', n=14, radius=0, resolution=0, forced=True) self.assertIsInstance(encoder, ScalarEncoder)
'Setting both scalar and resolution not allowed.'
def testSettingScalarAndResolution(self):
with self.assertRaises(ValueError): ScalarEncoder(3, None, None, name='scalar', n=0, radius=None, resolution=0.5, forced=True)
'If radius when maxval/minval = None creates instance.'
def testSettingRadiusWithMaxvalMinvalNone(self):
encoder = ScalarEncoder(3, None, None, name='scalar', n=0, radius=1.5, resolution=0, forced=True) self.assertIsInstance(encoder, ScalarEncoder)
'Send bitmap as array'
def testEncodeArray(self):
e = self._encoder(self.n, name=self.name) bitmap = [0, 0, 0, 1, 0, 0, 0, 0, 0] out = e.encode(bitmap) self.assertEqual(out.sum(), sum(bitmap)) x = e.decode(out) self.assertIsInstance(x[0], dict) self.assertTrue((self.name in x[0]))
'Send bitmap as numpy bit array'
def testEncodeBitArray(self):
e = self._encoder(self.n, name=self.name) bitmap = numpy.zeros(self.n, dtype=numpy.uint8) bitmap[3] = 1 bitmap[5] = 1 out = e.encode(bitmap) expectedSum = sum(bitmap) realSum = out.sum() self.assertEqual(realSum, expectedSum)
'Compare two bitmaps for closeness'
def testClosenessScores(self):
e = self._encoder(self.n, name=self.name) 'Identical => 1' bitmap1 = [0, 0, 0, 1, 1, 1, 0, 0, 0] bitmap2 = [0, 0, 0, 1, 1, 1, 0, 0, 0] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 1.0) 'No overlap => 0' bitmap1 = [0, 0, 0, 1, 1, 1, 0, 0, 0] bitmap2 = [1, 1, 1, 0, 0, 0, 1, 1, 1] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.0) 'Similar => 4 of 5 match' bitmap1 = [1, 0, 1, 0, 1, 0, 1, 0, 1] bitmap2 = [1, 0, 0, 1, 1, 0, 1, 0, 1] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8) 'Little => 1 of 5 match' bitmap1 = [1, 0, 0, 1, 1, 0, 1, 0, 1] bitmap2 = [0, 1, 1, 1, 0, 1, 0, 1, 0] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.2) 'Extra active bit => off by 1 of 5' bitmap1 = [1, 0, 1, 0, 1, 0, 1, 0, 1] bitmap2 = [1, 0, 1, 1, 1, 0, 1, 0, 1] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8) 'Missing active bit => off by 1 of 5' bitmap1 = [1, 0, 1, 0, 1, 0, 1, 0, 1] bitmap2 = [1, 0, 0, 0, 1, 0, 1, 0, 1] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8)
'testing auto-grow'
def testAutogrow(self):
fieldWidth = 100 bitsOn = 10 s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, name='foo', verbosity=2, forced=True) encoded = numpy.zeros(fieldWidth) self.assertEqual(s.topDownCompute(encoded).value, '<UNKNOWN>') s.encodeIntoArray('catA', encoded) self.assertEqual(encoded.sum(), bitsOn) self.assertEqual(s.getScalars('catA'), 1) catA = encoded.copy() s.encodeIntoArray('catB', encoded) self.assertEqual(encoded.sum(), bitsOn) self.assertEqual(s.getScalars('catB'), 2) catB = encoded.copy() self.assertEqual(s.topDownCompute(catA).value, 'catA') self.assertEqual(s.topDownCompute(catB).value, 'catB') s.encodeIntoArray(SENTINEL_VALUE_FOR_MISSING_DATA, encoded) self.assertEqual(sum(encoded), 0) self.assertEqual(s.topDownCompute(encoded).value, '<UNKNOWN>') s.setLearning(False) s.encodeIntoArray('catC', encoded) self.assertEqual(encoded.sum(), bitsOn) self.assertEqual(s.getScalars('catC'), 0) self.assertEqual(s.topDownCompute(encoded).value, '<UNKNOWN>') s.setLearning(True) s.encodeIntoArray('catC', encoded) self.assertEqual(encoded.sum(), bitsOn) self.assertEqual(s.getScalars('catC'), 3) self.assertEqual(s.topDownCompute(encoded).value, 'catC')
'Test basic encoding functionality. Create encodings without crashing and check they contain the correct number of on and off bits. Check some encodings for expected overlap. Test that encodings for old values don\'t change once we generate new buckets.'
def testEncoding(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=23, n=500, offset=0.0) e0 = encoder.encode((-0.1)) self.assertEqual(e0.sum(), 23, 'Number of on bits is incorrect') self.assertEqual(e0.size, 500, 'Width of the vector is incorrect') self.assertEqual(encoder.getBucketIndices(0.0)[0], (encoder._maxBuckets / 2), "Offset doesn't correspond to middle bucket") self.assertEqual(len(encoder.bucketMap), 1, 'Number of buckets is not 1') e1 = encoder.encode(1.0) self.assertEqual(len(encoder.bucketMap), 2, 'Number of buckets is not 2') self.assertEqual(e1.sum(), 23, 'Number of on bits is incorrect') self.assertEqual(e1.size, 500, 'Width of the vector is incorrect') self.assertEqual(computeOverlap(e0, e1), 22, 'Overlap is not equal to w-1') e25 = encoder.encode(25.0) self.assertGreater(len(encoder.bucketMap), 23, 'Number of buckets is not 2') self.assertEqual(e25.sum(), 23, 'Number of on bits is incorrect') self.assertEqual(e25.size, 500, 'Width of the vector is incorrect') self.assertLess(computeOverlap(e0, e25), 4, 'Overlap is too high') self.assertTrue(numpy.array_equal(e0, encoder.encode((-0.1))), 'Encodings are not consistent - they have changed after new buckets have been created') self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)), 'Encodings are not consistent - they have changed after new buckets have been created')
'Test that missing values and NaN return all zero\'s.'
def testMissingValues(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0) empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(empty.sum(), 0) empty = encoder.encode(float('nan')) self.assertEqual(empty.sum(), 0)
'Test that numbers within the same resolution return the same encoding. Numbers outside the resolution should return different encodings.'
def testResolution(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0) e23 = encoder.encode(23.0) e23p1 = encoder.encode(23.1) e22p9 = encoder.encode(22.9) e24 = encoder.encode(24.0) self.assertEqual(e23.sum(), encoder.w) self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(), 'Numbers outside resolution have the same encoding') e22p9 = encoder.encode(22.5) self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(), 'Numbers outside resolution have the same encoding')
'Test that mapBucketIndexToNonZeroBits works and that max buckets and clipping are handled properly.'
def testMapBucketIndexToNonZeroBits(self):
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150) encoder._initializeBucketMap(10, None) encoder.encode(0.0) encoder.encode((-7.0)) encoder.encode(7.0) self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets, '_maxBuckets exceeded') self.assertTrue(numpy.array_equal(encoder.mapBucketIndexToNonZeroBits((-1)), encoder.bucketMap[0]), 'mapBucketIndexToNonZeroBits did not handle negative index') self.assertTrue(numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000), encoder.bucketMap[9]), 'mapBucketIndexToNonZeroBits did not handle negative index') e23 = encoder.encode(23.0) e6 = encoder.encode(6) self.assertEqual((e23 == e6).sum(), encoder.getWidth(), 'Values not clipped correctly during encoding') ep8 = encoder.encode((-8)) ep7 = encoder.encode((-7)) self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(), 'Values not clipped correctly during encoding') self.assertEqual(encoder.getBucketIndices((-8))[0], 0, 'getBucketIndices returned negative bucket index') self.assertEqual(encoder.getBucketIndices(23)[0], (encoder._maxBuckets - 1), 'getBucketIndices returned bucket index that is too large')
'Test that some bad construction parameters get handled.'
def testParameterChecks(self):
with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name='mv', resolution=1.0, n=int((5.9 * 21))) with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name='mv', resolution=1.0, n=(5.9 * 21)) with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name='mv', resolution=1.0, w=(-1)) with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name='mv', resolution=(-2))
'Check that the overlaps for the encodings are within the expected range. Here we ask the encoder to create a bunch of representations under somewhat stressful conditions, and then verify they are correct. We rely on the fact that the _overlapOK and _countOverlapIndices methods are working correctly.'
def testOverlapStatistics(self):
seed = getSeed() encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150, seed=seed) encoder.encode(0.0) encoder.encode((-300.0)) encoder.encode(300.0) self.assertTrue(validateEncoder(encoder, subsampling=3), 'Illegal overlap encountered in encoder')
'Test that the getWidth, getDescription, and getDecoderOutputFieldTypes methods work.'
def testGetMethods(self):
encoder = RandomDistributedScalarEncoder(name='theName', resolution=1.0, n=500) self.assertEqual(encoder.getWidth(), 500, "getWidth doesn't return the correct result") self.assertEqual(encoder.getDescription(), [('theName', 0)], "getDescription doesn't return the correct result") self.assertEqual(encoder.getDecoderOutputFieldTypes(), (FieldMetaType.float,), "getDecoderOutputFieldTypes doesn't return the correct result")
'Test that offset is working properly'
def testOffset(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 23.0, 'Offset not specified and not initialized to first input') encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, offset=25.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 25.0, 'Offset not initialized to specified constructor parameter')
'Test that initializing twice with the same seed returns identical encodings and different when not specified'
def testSeed(self):
encoder1 = RandomDistributedScalarEncoder(name='encoder1', resolution=1.0, seed=42) encoder2 = RandomDistributedScalarEncoder(name='encoder2', resolution=1.0, seed=42) encoder3 = RandomDistributedScalarEncoder(name='encoder3', resolution=1.0, seed=(-1)) encoder4 = RandomDistributedScalarEncoder(name='encoder4', resolution=1.0, seed=(-1)) e1 = encoder1.encode(23.0) e2 = encoder2.encode(23.0) e3 = encoder3.encode(23.0) e4 = encoder4.encode(23.0) self.assertEqual((e1 == e2).sum(), encoder1.getWidth(), 'Same seed gives rise to different encodings') self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(), 'Different seeds gives rise to same encodings') self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(), 'seeds of -1 give rise to same encodings')
'Test that the internal method _countOverlapIndices works as expected.'
def testCountOverlapIndices(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=5, n=(5 * 20)) midIdx = (encoder._maxBuckets / 2) encoder.bucketMap[(midIdx - 2)] = numpy.array(range(3, 8)) encoder.bucketMap[(midIdx - 1)] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[(midIdx + 1)] = numpy.array(range(6, 11)) encoder.bucketMap[(midIdx + 2)] = numpy.array(range(7, 12)) encoder.bucketMap[(midIdx + 3)] = numpy.array(range(8, 13)) encoder.minIndex = (midIdx - 2) encoder.maxIndex = (midIdx + 3) with self.assertRaises(ValueError): encoder._countOverlapIndices((midIdx - 3), (midIdx - 2)) with self.assertRaises(ValueError): encoder._countOverlapIndices((midIdx - 2), (midIdx - 3)) self.assertEqual(encoder._countOverlapIndices((midIdx - 2), (midIdx - 2)), 5, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices((midIdx - 1), (midIdx - 2)), 4, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices((midIdx + 1), (midIdx - 2)), 2, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices((midIdx - 2), (midIdx + 3)), 0, "_countOverlapIndices didn't work")
'Test that the internal method _overlapOK works as expected.'
def testOverlapOK(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=5, n=(5 * 20)) midIdx = (encoder._maxBuckets / 2) encoder.bucketMap[(midIdx - 3)] = numpy.array(range(4, 9)) encoder.bucketMap[(midIdx - 2)] = numpy.array(range(3, 8)) encoder.bucketMap[(midIdx - 1)] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[(midIdx + 1)] = numpy.array(range(6, 11)) encoder.bucketMap[(midIdx + 2)] = numpy.array(range(7, 12)) encoder.bucketMap[(midIdx + 3)] = numpy.array(range(8, 13)) encoder.minIndex = (midIdx - 3) encoder.maxIndex = (midIdx + 3) self.assertTrue(encoder._overlapOK(midIdx, (midIdx - 1)), "_overlapOK didn't work") self.assertTrue(encoder._overlapOK((midIdx - 2), (midIdx + 3)), "_overlapOK didn't work") self.assertFalse(encoder._overlapOK((midIdx - 3), (midIdx - 1)), "_overlapOK didn't work") self.assertTrue(encoder._overlapOK(100, 50, 0), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(100, 50, encoder._maxOverlap), "_overlapOK didn't work for far values") self.assertFalse(encoder._overlapOK(100, 50, (encoder._maxOverlap + 1)), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(50, 50, 5), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(48, 50, 3), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(46, 50, 1), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(45, 50, encoder._maxOverlap), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 4), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(46, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(50, 50, 6), "_overlapOK didn't work for near values")
'Test that the internal method _countOverlap works as expected.'
def testCountOverlap(self):
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, n=500) r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 6, '_countOverlap result is incorrect') r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 7]) self.assertEqual(encoder._countOverlap(r1, r2), 5, '_countOverlap result is incorrect') r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([6, 5, 4, 3, 2, 1]) self.assertEqual(encoder._countOverlap(r1, r2), 6, '_countOverlap result is incorrect') r1 = numpy.array([1, 2, 8, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 9, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 4, '_countOverlap result is incorrect') r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3]) self.assertEqual(encoder._countOverlap(r1, r2), 3, '_countOverlap result is incorrect') r1 = numpy.array([7, 8, 9, 10, 11, 12]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 0, '_countOverlap result is incorrect')
'Test that nothing is printed out when verbosity=0'
def testVerbosity(self):
_stdout = sys.stdout sys.stdout = _stringio = StringIO() encoder = RandomDistributedScalarEncoder(name='mv', resolution=1.0, verbosity=0) output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype) encoder.encodeIntoArray(23.0, output) encoder.getBucketIndices(23.0) sys.stdout = _stdout self.assertEqual(len(_stringio.getvalue()), 0, "zero verbosity doesn't lead to zero output")
'missing values'
def testMissingValues(self):
mv = AdaptiveScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True) empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(empty.sum(), 0)
'Non-periodic encoder, min and max specified'
def testNonPeriodicEncoderMinMaxSpec(self):
self.assertTrue(numpy.array_equal(self._l.encode(1), numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(self._l.encode(2), numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype))) self.assertTrue(numpy.array_equal(self._l.encode(10), numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=defaultDtype)))
'Test the input description generation and topDown decoding'
def testTopDownDecode(self):
l = self._l v = l.minval while (v < l.maxval): output = l.encode(v) decoded = l.decode(output) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) (rangeMin, rangeMax) = ranges[0] self.assertEqual(rangeMin, rangeMax) self.assertLess(abs((rangeMin - v)), l.resolution) topDown = l.topDownCompute(output)[0] self.assertLessEqual(abs((topDown.value - v)), l.resolution) bucketIndices = l.getBucketIndices(v) topDown = l.getBucketInfo(bucketIndices)[0] self.assertLessEqual(abs((topDown.value - v)), (l.resolution / 2)) self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]]) self.assertEqual(topDown.scalar, topDown.value) self.assertTrue(numpy.array_equal(topDown.encoding, output)) v += (l.resolution / 4)
'Make sure we can fill in holes'
def testFillHoles(self):
l = self._l decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1])) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [10, 10]) decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1])) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 1) (ranges, _) = fieldsDict.values()[0] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [10, 10])
'Non-periodic encoder, min and max not specified'
def testNonPeriodicEncoderMinMaxNotSpec(self):
l = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=None, maxval=None, periodic=False, forced=True) def _verify(v, encoded, expV=None): if (expV is None): expV = v self.assertTrue(numpy.array_equal(l.encode(v), numpy.array(encoded, dtype=defaultDtype))) self.assertLessEqual(abs((l.getBucketInfo(l.getBucketIndices(v))[0].value - expV)), (l.resolution / 2)) def _verifyNot(v, encoded): self.assertFalse(numpy.array_equal(l.encode(v), numpy.array(encoded, dtype=defaultDtype))) _verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _verify(2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(3, [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]) _verify((-9), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _verify((-8), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _verify((-7), [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) _verify((-6), [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) _verify((-5), [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]) _verify(0, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) _verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0]) _verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0]) _verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(11, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(12, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(13, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(14, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(15, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) l = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True) _verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify(10, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) l.setLearning(False) _verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], expV=20) _verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify((-10), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1) _verify((-1), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1) l.setLearning(True) _verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verifyNot(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) _verify((-10), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _verifyNot((-1), [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
'Test setting the min and max using setFieldStats'
def testSetFieldStats(self):
def _dumpParams(enc): return (enc.n, enc.w, enc.minval, enc.maxval, enc.resolution, enc._learningEnabled, enc.recordNum, enc.radius, enc.rangeInternal, enc.padding, enc.nInternal) sfs = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True) reg = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=100, periodic=False, forced=True) self.assertNotEqual(_dumpParams(sfs), _dumpParams(reg), 'Params should not be equal, since the two encoders were instantiated with different values.') sfs.setFieldStats('this', {'this': {'min': 1, 'max': 100}}) self.assertEqual(_dumpParams(sfs), _dumpParams(reg), 'Params should now be equal, but they are not. sFS should be equivalent to initialization.')
'Test that radius will round to the nearest integer'
def testRadiusForSpeedInt(self):
scale = 30 timestep = 62 speed = 25 encoder = GeospatialCoordinateEncoder(scale, timestep) radius = encoder.radiusForSpeed(speed) self.assertEqual(radius, 38)
'Testing MultiEncoder...'
def testMultiEncoder(self):
e = MultiEncoder() e.addEncoder('dow', ScalarEncoder(w=3, resolution=1, minval=1, maxval=8, periodic=True, name='day of week', forced=True)) e.addEncoder('myval', ScalarEncoder(w=5, resolution=1, minval=1, maxval=10, periodic=False, name='aux', forced=True)) self.assertEqual(e.getWidth(), 21) self.assertEqual(e.getDescription(), [('day of week', 0), ('aux', 7)]) d = DictObj(dow=3, myval=10) expected = numpy.array(([0, 1, 1, 1, 0, 0, 0] + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]), dtype='uint8') output = e.encode(d) self.assertTrue(numpy.array_equal(expected, output)) decoded = e.decode(output) self.assertEqual(len(decoded), 2) (ranges, _) = decoded[0]['aux'] self.assertEqual(len(ranges), 1) self.assertTrue(numpy.array_equal(ranges[0], [10, 10])) (ranges, _) = decoded[0]['day of week'] self.assertTrue(((len(ranges) == 1) and numpy.array_equal(ranges[0], [3, 3]))) e.addEncoder('myCat', SDRCategoryEncoder(n=7, w=3, categoryList=['run', 'pass', 'kick'], forced=True)) d = DictObj(dow=4, myval=6, myCat='pass') output = e.encode(d) topDownOut = e.topDownCompute(output) self.assertAlmostEqual(topDownOut[0].value, 4.5) self.assertEqual(topDownOut[1].value, 6.0) self.assertEqual(topDownOut[2].value, 'pass') self.assertEqual(topDownOut[2].scalar, 2) self.assertEqual(topDownOut[2].encoding.sum(), 3)