desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test that we can load in a checkpoint saved by an earlier version of the OPF. Parameters: experiment: Directory of the experiment. checkpointName: which checkpoint to verify'
@staticmethod def _testBackwardsCompatibility(experiment, checkpointName):
expDir = os.path.join(_EXPERIMENT_BASE, experiment) if os.path.exists(os.path.join(expDir, 'savedmodels')): shutil.rmtree(os.path.join(expDir, 'savedmodels')) shutil.copytree(src=os.path.join(expDir, checkpointName), dst=os.path.join(expDir, 'savedmodels')) _aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask', '--noCheckpoint'])
'Test that we get the same predictions out of a model that was saved and reloaded from a checkpoint as we do from one that runs continuously.'
def test_NonTemporalMultiStep(self):
self._testSamePredictions(experiment='non_temporal_multi_step', predSteps=24, checkpointAt=250, predictionsFilename='DefaultTask.NontemporalMultiStep.predictionLog.csv')
'Test that we get the same predictions out of a model that was saved and reloaded from a checkpoint as we do from one that runs continuously. Uses new capnproto serialization.'
@unittest.skipUnless(capnp, 'pycapnp is not installed, skipping serialization test.') def test_NonTemporalMultiStepNew(self):
self._testSamePredictions(experiment='non_temporal_multi_step', predSteps=24, checkpointAt=250, predictionsFilename='DefaultTask.NontemporalMultiStep.predictionLog.csv', newSerialization=True)
'Test that we get the same predictions out of a model that was saved and reloaded from a checkpoint as we do from one that runs continuously.'
@unittest.skip('Currently Fails: NUP-1864') def test_TemporalMultiStep(self):
self._testSamePredictions(experiment='temporal_multi_step', predSteps=24, checkpointAt=250, predictionsFilename='DefaultTask.TemporalMultiStep.predictionLog.csv')
'Test that we get the same predictions out of a model that was saved and reloaded from a checkpoint as we do from one that runs continuously.'
@unittest.skip('Currently Fails: NUP-1864') def test_TemporalAnomaly(self):
self._testSamePredictions(experiment='temporal_anomaly', predSteps=1, checkpointAt=250, predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv', additionalFields=['anomalyScore'])
'Test that we can load in a checkpoint saved by an earlier version of the OPF.'
@unittest.skip("We aren't currently supporting serialization backward compatibility") def test_BackwardsCompatibility(self):
self._testBackwardsCompatibility(os.path.join('backwards_compatibility', 'a'), 'savedmodels_2012-10-05')
'Method called to prepare the test fixture. This is called immediately before calling the test method; any exception raised by this method will be considered an error rather than a test failure. The default implementation does nothing. NOTE: this is called once for every sub-test and a new AggregationTests instance is constructed for every sub-test.'
def setUp(self):
print return
'Run all the tests in our suite, catching any exceptions that might be thrown.'
def testAll(self):
print 'VectorFileSensorTest parameters:' print ('PYTHONPATH: %s' % os.environ.get('PYTHONPATH', 'NOT SET')) print ('filename: %s' % self.filename) self._testRunWithoutFile() self._testNetLoad() self._testFakeLoadFile() self._testRepeatCount() self._testUnknownCommand() self._testOutputCounts(0) self._testLoadFile(self.dataFile, '0', '0') self._testOutputCounts(5) self._testLoadFile(self.dataFile, '0', '0') self._testRun() self._testLoadFile(self.dataFile2, '', '0') self._testRun() self._testLoadFile(self.dataFile2, '2', '0') self._testRun() self._testLoadFile(self.dataFile3a, '3', '0') self._testRun() self._testLoadFile(self.dataFile4, '4', '0') self._testRun() self._testLoadFile(self.dataFile5, '5', '0') self._testRun() self._testLoadFile(self.dataFile6, '6', '0') self._testRun() self._testPosition() self._testAppendFile(self.dataFile2, '2', '1', 10) self._testAppendFile(self.dataFile, '0', '1', 15) self._testRun() self._testScaling(self.dataFile3b, '3') self.sensor.setParameter('hasCategoryOut', 1) self.sensor.setParameter('hasResetOut', 1) self._testLoadFile(self.dataFile3c, '3', '0') self._testOptionalOutputs() self.sensor.setParameter('hasCategoryOut', 0) self.sensor.setParameter('hasResetOut', 0)
'Test loading a network with this sensor in it.'
def _testNetLoad(self):
n = Network() r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}') r.dimensions = Dimensions([1]) n.save(self.filename) n = Network(self.filename) n.initialize() self.testsPassed += 1 r = n.regions[self.nodeName] res = r.getParameter('vectorCount') self.assertEqual(res, 0, ("getting vectorCount:\n Expected '0', got back '%d'\n" % res)) self.sensor = r
'Test reading in a fake file.'
def _testFakeLoadFile(self):
with self.assertRaises(RuntimeError): self.sensor.executeCommand(['loadFile', 'ExistenceIsAnIllusion.txt', '0'])
'Test running the network without a file loaded. This should be run before any file has been loaded in!'
def _testRunWithoutFile(self):
with self.assertRaises(AttributeError): self.sensor.compute()
'Test setting and getting repeat count using parameters.'
def _testRepeatCount(self):
n = Network(self.filename) sensor = n.regions[self.nodeName] res = sensor.executeCommand(['dump']) expected = (self.sensorName + ' isLabeled = 0 repeatCount = 1 vectorCount = 0 iterations = 0\n') self.assertEqual(res, expected, ("repeat count test:\n expected '%s'\n got '%s'\n" % (expected, res))) sensor.setParameter('repeatCount', 42) res = sensor.getParameter('repeatCount') self.assertEqual(res, 42, ("set repeatCount to 42:\n got back '%d'\n" % res)) res = sensor.executeCommand(['dump']) expected = (self.sensorName + ' isLabeled = 0 repeatCount = 42 vectorCount = 0 iterations = 0\n') self.assertEqual(res, expected, ("set to 42 test:\n expected '%s'\n got '%s'\n" % (expected, res))) sensor.setParameter('repeatCount', 1)
'Test reading our sample vector file. The sample file has 5 vectors of the correct length, plus one with incorrect length. The sensor should ignore the last line.'
def _testLoadFile(self, dataFile, fileFormat='', iterations=''):
if (fileFormat != ''): res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat]) else: res = self.sensor.executeCommand(['loadFile', dataFile]) self.assertTrue(((res == '') or res.startswith('VectorFileSensor read in file')), ('loading a real file: %s' % str(res))) res = self.sensor.getParameter('recentFile') self.assertEqual(res, dataFile, ('recent file, got: %s' % res)) res = self.sensor.executeCommand(['dump']) expected = (((self.sensorName + ' isLabeled = 0 repeatCount = 1 vectorCount = 5 iterations = ') + iterations) + '\n') self.assertEqual(res, expected, ('file summary:\n expected "%s"\n got "%s"\n' % (expected, res)))
'Test appending our sample vector file. The sample file has 5 vectors of the correct length, plus one with incorrect length. The sensor should ignore the last line.'
def _testAppendFile(self, dataFile, fileFormat='', iterations='', numVecs=''):
if (fileFormat != ''): res = self.sensor.executeCommand(['appendFile', dataFile, fileFormat]) else: res = self.sensor.executeCommand(['appendFile', dataFile]) self.assertTrue(((res == '') or res.startswith('VectorFileSensor read in file')), ('loading a real file: %s' % str(res))) res = self.sensor.getParameter('recentFile') self.assertEqual(res, dataFile, ('recent file, got: %s' % res)) res = self.sensor.executeCommand(['dump']) expected = ((((((self.sensorName + ' isLabeled = 0 repeatCount = 1') + ' vectorCount = ') + str(numVecs)) + ' iterations = ') + iterations) + '\n') self.assertEqual(res, expected, ('file summary:\n expected "%s"\n got "%s"\n' % (expected, res))) res = self.sensor.getParameter('vectorCount') self.assertEqual(res, numVecs, (('getting position:\n Expected ' + str(numVecs)) + (', got back "%s"\n' % res)))
'This is the basic workhorse test routine. It runs the net several times to ensure the sensor is outputting the correct values. The routine tests looping, tests each vector, and tests repeat count.'
def _testRun(self):
self.sensor.setParameter('repeatCount', 3) self.sensor.setParameter('position', 0) for _epoch in [1, 2]: for vec in [0, 1, 2, 3, 4]: for _rc in [1, 2, 3]: self.sensor.compute() outputs = self.sensor.getOutputData('dataOut') self.assertEqual(outputs[vec], (vec + 1), ('output = %s' % str(outputs))) self.assertEqual(sum(outputs), (vec + 1), ('output = %s' % str(outputs))) self.sensor.setParameter('repeatCount', 1)
'Test maxOutputVectorCount with different repeat counts.'
def _testOutputCounts(self, vectorCount):
res = self.sensor.getParameter('maxOutputVectorCount') self.assertEqual(res, vectorCount, (("getting maxOutputVectorCount:\n Expected '" + str(vectorCount)) + ("', got back '%d'\n" % res))) self.sensor.setParameter('repeatCount', 3) res = self.sensor.getParameter('maxOutputVectorCount') self.assertEqual(res, (3 * vectorCount), (('getting maxOutputVectorCount:\n Expected ' + str((3 * vectorCount))) + (', got back "%d"\n' % res))) self.sensor.setParameter('repeatCount', 1) res = self.sensor.getParameter('activeOutputCount') self.assertEqual(res, 11, ('getting activeOutputCount :\n Expected 11, got back "%d"\n' % res))
'Test setting and getting position parameter. Run compute once to verify it went to the right position.'
def _testPosition(self):
self.sensor.setParameter('position', 2) self.sensor.compute() outputs = self.sensor.getOutputData('dataOut') self.assertEqual(outputs[2], 3, ('output = %s' % str(outputs))) self.assertEqual(sum(outputs), 3, ('output = %s' % str(outputs))) res = self.sensor.getParameter('position') self.assertEqual(res, 3, ('getting position:\n Expected "3", got back "%d"\n' % res))
'Specific tests for setScaleVector, setOffsetVector, and scalingMode'
def _testScaling(self, dataFile, fileFormat=''):
res = self.sensor.getParameter('scalingMode') self.assertEqual(res, 'none', ('Getting scalingMode:\n Expected "none", got back "%s"\n' % res)) a = Array('Real32', 11) self.sensor.getParameterArray('scaleVector', a) self.assertEqual(str(a), '[ 1 1 1 1 1 1 1 1 1 1 1 ]', ('Error getting ones scaleVector:\n Got back "%s"\n' % str(res))) self.sensor.getParameterArray('offsetVector', a) self.assertEqual(str(a), '[ 0 0 0 0 0 0 0 0 0 0 0 ]', ('Error getting zero offsetVector:\n Got back "%s"\n' % str(res))) self.sensor.executeCommand(['loadFile', dataFile, fileFormat]) self.sensor.setParameter('scalingMode', 'standardForm') self.sensor.getParameterArray('scaleVector', a) s = '[ 2.23607 1.11803 0.745356 0.559017 0.447214 2.23607 1.11803 0.745356 0.559017 0.447214 2.23607 ]' self.assertEqual(str(a), s, ('Error getting standardForm scaleVector:\n Got back "%s"\n' % res)) o = '[ -0.2 -0.4 -0.6 -0.8 -1 -0.2 -0.4 -0.6 -0.8 -1 -0.2 ]' self.sensor.getParameterArray('offsetVector', a) self.assertEqual(str(a), o, ('Error getting standardForm offsetVector:\n Got back "%s"\n' % res)) scaleVector = Array('Real32', 11) for (i, x) in enumerate((1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1)): scaleVector[i] = x self.sensor.setParameterArray('scaleVector', scaleVector) self.sensor.getParameterArray('scaleVector', a) self.assertEqual(str(a), str(scaleVector), ('Error getting modified scaleVector:\n Got back "%s"\n' % str(res))) offsetVector = Array('Real32', 11) for (i, x) in enumerate((1, 2, 3, 4, 1, 1, 1, 1, 1, 2, 1)): offsetVector[i] = x self.sensor.setParameterArray('offsetVector', offsetVector) self.sensor.getParameterArray('offsetVector', a) self.assertEqual(str(a), str(offsetVector), ('Error getting modified offsetVector:\n Got back "%s"\n' % str(res))) mode = self.sensor.getParameter('scalingMode') self.assertEqual(mode, 'custom', ('Getting scalingMode:\n Expected "custom", got back "%s"\n' % res)) res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat]) self.sensor.getParameterArray('offsetVector', a) self.assertEqual(str(a), str(offsetVector), ('Error getting modified offsetVector after loadFile:\n Got back "%s"\n' % res)) self.sensor.getParameterArray('scaleVector', a) self.assertEqual(str(a), str(scaleVector), ('Error getting modified scaleVector after loadFile:\n Got back "%s"\n' % res)) self.sensor.setParameter('scalingMode', 'none') self.sensor.getParameterArray('scaleVector', a) noScaling = Array('Real32', 11) for i in range(11): noScaling[i] = 1 self.assertEqual(str(a), str(noScaling), ('Error getting ones scaleVector:\n Got back "%s"\n' % res)) noOffset = Array('Real32', 11) for i in range(11): noOffset[i] = 0 self.sensor.getParameterArray('offsetVector', a) self.assertEqual(str(a), str(noOffset), ('Error getting zero offsetVector:\n Got back "%s"\n' % res))
'Test that exception is thrown when unknown execute command sent.'
def _testUnknownCommand(self):
with self.assertRaises(RuntimeError): self.sensor.executeCommand(['nonExistentCommand'])
'This is the basic workhorse test routine. It runs the net several times to ensure the sensor is outputting the correct values. The routine tests looping, tests each vector, and tests repeat count.'
def _testOptionalOutputs(self):
self.sensor.setParameter('repeatCount', 3) self.sensor.setParameter('position', 0) categories = [] resetOuts = [] for _epoch in [1, 2]: for vec in [0, 1, 2, 3, 4]: for _rc in [1, 2, 3]: self.sensor.compute() outputs = self.sensor.getOutputData('dataOut') a = self.sensor.getOutputData('categoryOut') categories.append(a[0]) a = self.sensor.getOutputData('resetOut') resetOuts.append(a[0]) self.assertEqual(outputs[vec], (vec + 1), ('output = %s' % str(outputs))) self.assertEqual(sum(outputs), (vec + 1), ('output = %s' % str(outputs))) self.assertEqual(categories, (2 * (([6] * 12) + ([8] * 3)))) self.assertEqual(resetOuts, (2 * [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])) self.sensor.setParameter('repeatCount', 1)
'Test compatibility between C++ and Python TM implementation.'
def testTMPyCpp(self):
results1 = createAndRunNetwork(TMRegion, 'bottomUpOut', checkpointMidway=False, temporalImp='tm_cpp') results2 = createAndRunNetwork(TMRegion, 'bottomUpOut', checkpointMidway=False, temporalImp='tm_py') self.compareArrayResults(results1, results2)
'Function that iterates through the running Processes and counts the number of processes that are currently alive. Sets numRunningProcs to this count'
def __updateProcessCounter(self):
newcounter = 0 for job in self.__procs: if job.is_alive(): newcounter += 1 self.__numRunningProcs = newcounter return newcounter
'Function that cancels all the jobs in the process queue.'
def cancelJobs(self):
print 'Terminating all Jobs due to reaching timeout' for proc in self.__procs: if (not proc.is_alive()): proc.terminate() print 'All jobs have been terminated'
'Function that launched Hypersearch benchmark jobs. Runs jobs contained in self.testQ, until maxJobs are running in parallel at which point it waits until some jobs finish.'
def runJobs(self, maxJobs):
jobsrunning = self.__numRunningProcs if (maxJobs > 1): jobsindx = 0 while ((jobsindx < len(self.testQ)) or (jobsrunning > 0)): if ((jobsindx < len(self.testQ)) and (jobsrunning < maxJobs)): curJob = self.testQ[jobsindx] p = Process(target=curJob[0], args=curJob[1]) p.start() self.__procs.append(p) jobsindx += 1 if (jobsrunning >= maxJobs): time.sleep(30) print 'Maximum number of jobs running, waiting before launching new jobs' elif (jobsindx == len(self.testQ)): time.sleep(30) print 'Waiting for all scheduled tests to finish.' jobsrunning = self.__updateProcessCounter() for proc in self.__procs: if (proc.exitcode == 1): self.cancelJobs() assert False, 'Some jobs have not been able to complete in the allotted time.' try: while True: result = self.__resultQ.get(True, 5) self.assertBenchmarks(result) except Empty: pass
'Setup up a dict of branchings and particles'
def setUpExportDicts(self):
ret = [] if (self.maxBranchings is None): self.maxBranchings = [None] else: self.maxBranchings = self.maxBranchings.split(',') if (self.maxParticles is None): self.maxParticles = [None] else: self.maxParticles = self.maxParticles.split(',') for branch in self.maxBranchings: for part in self.maxParticles: curdict = dict() if (not (branch is None)): curdict[self.BRANCHING_PROP] = branch if (not (part is None)): curdict[self.PARTICLE_PROP] = part ret += [curdict] return ret
'Try running a basic experiment and permutations.'
def benchmarkHotGym(self):
dataPath = os.path.join(self.datadir, 'hotgym', 'hotgym.csv') streamDef = dict(version=1, info='hotgym benchmark test', streams=[dict(source=('file://%s' % dataPath), info='hotgym.csv', columns=['gym', 'timestamp', 'consumption'], last_record=self.splits['hotgym'])], aggregation={'hours': 1, 'fields': [('consumption', 'sum'), ('gym', 'first')]}) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'consumption' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 1.1, 'maxValue': 44.72}, {'fieldName': 'gym', 'fieldType': 'string'}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'hotgym') self.generateModules(expDesc, expdir) self.descriptions['hotgym'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkSine(self):
dataPath = os.path.join(self.datadir, 'sine', 'sine.csv') streamDef = dict(version=1, info='hotgym benchmark test', streams=[dict(source=('file://%s' % dataPath), info='sine.csv', columns=['Sine', 'angle'], last_record=self.splits['sine'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'Sine' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'Sine', 'fieldType': 'float', 'minValue': (-1.0), 'maxValue': 1.0}, {'fieldName': 'angle', 'fieldType': 'float', 'minValue': 0.0, 'maxValue': 25.0}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'sine') self.generateModules(expDesc, expdir) self.descriptions['sine'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkTwoVars(self):
dataPath = os.path.join(self.datadir, 'generated', 'spatial', 'linear_two_fields', 'sample2.csv') streamDef = dict(version=1, info='two fields test', streams=[dict(source=('file://%s' % dataPath), info='linear_two_fields', columns=['field1', 'field2'], last_record=self.splits['twovars'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'field1' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'field1', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field2', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'twovars') self.generateModules(expDesc, expdir) self.descriptions['twovars'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkThreeVars(self):
dataPath = os.path.join(self.datadir, 'generated', 'spatial', 'linear_two_plus_one_fields', 'sample1.csv') streamDef = dict(version=1, info='three fields test', streams=[dict(source=('file://%s' % dataPath), info='linear_two_plus_one_fields', columns=['field1', 'field2', 'field3'], last_record=self.splits['threevars'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'field1' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'field1', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field2', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field3', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'threevars') self.generateModules(expDesc, expdir) self.descriptions['threevars'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkFourVars(self):
dataPath = os.path.join(self.datadir, 'generated', 'spatial', 'sum_two_fields_plus_extra_field', 'sample1.csv') streamDef = dict(version=1, info='four fields test', streams=[dict(source=('file://%s' % dataPath), info='linear_two_plus_one_fields', columns=['field1', 'field2', 'field3', 'field4'], last_record=self.splits['fourvars'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'field1' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'field1', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 210}, {'fieldName': 'field2', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field3', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field4', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'fourvars') self.generateModules(expDesc, expdir) self.descriptions['fourvars'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkCategories(self):
dataPath = os.path.join(self.datadir, 'generated', 'temporal', 'categories', 'sample1.csv') streamDef = dict(version=1, info='categories test', streams=[dict(source=('file://%s' % dataPath), info='categories', columns=['field1', 'field2'], last_record=self.splits['categories'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'field2' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'field1', 'fieldType': 'string'}, {'fieldName': 'field2', 'fieldType': 'string'}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'categories') self.generateModules(expDesc, expdir) self.descriptions['categories'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkTwoVarsSquare(self):
dataPath = os.path.join(self.datadir, 'generated', 'spatial', 'linear_two_fields', 'sample3.csv') streamDef = dict(version=1, info='three fields test', streams=[dict(source=('file://%s' % dataPath), info='linear_two_fields', columns=['field1', 'field2'], last_record=self.splits['twovars2'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'field1' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'field1', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 110}, {'fieldName': 'field2', 'fieldType': 'int', 'minValue': (-10), 'maxValue': 10010}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'twovars2') self.generateModules(expDesc, expdir) self.descriptions['twovars2'] = (expdir, expDesc) return expdir
'Try running a basic experiment and permutations'
def benchmarkSawtooth(self):
dataPath = os.path.join(self.datadir, 'sawtooth', 'sawtooth.csv') streamDef = dict(version=1, info='sawtooth test', streams=[dict(source=('file://%s' % dataPath), info='sawtooth', columns=['value'], last_record=self.splits['sawtooth'])]) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'value' expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'value', 'fieldType': 'int', 'runDelta': True}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'sawtooth') self.generateModules(expDesc, expdir) self.descriptions['sawtooth'] = (expdir, expDesc) return expdir
'The HotGym dataset, only the first gym, solved using spatial classification. This model learns the association between the date/time stamp and the consumption - the model does not get consumption fed in at the bottom.'
def benchmarkHotGymSC(self):
dataPath = os.path.join(self.datadir, 'hotgym', 'hotgym.csv') streamDef = dict(version=1, info='hotgym spatial classification benchmark test', streams=[dict(source=('file://%s' % dataPath), info='hotgym.csv', columns=['gym', 'timestamp', 'consumption'], last_record=self.splits['hotgymsc'])], aggregation={'hours': 1, 'fields': [('consumption', 'sum'), ('gym', 'first')]}) expDesc = OPFBenchmarkRunner.EXP_COMMON.copy() expDesc['inferenceArgs']['predictedField'] = 'consumption' expDesc['inferenceArgs']['predictionSteps'] = [0] expDesc.update({'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 100}, {'fieldName': 'gym', 'fieldType': 'string'}], 'iterationCount': self.__recordsToProcess}) expdir = os.path.join(self.outdir, 'hotgymsc') self.generateModules(expDesc, expdir) self.descriptions['hotgymsc'] = (expdir, expDesc) return expdir
'This calls ExpGenerator to generate a base description file and permutations file from expDesc. Parameters: expDesc: Experiment description dict outDir: Which output directory to use'
def generateModules(self, expDesc, outdir):
jobParams = dict(desription=expDesc) shutil.rmtree(outdir, ignore_errors=True) outdirv2term = os.path.join(outdir, 'v2Term', 'base') outdirv2noterm = os.path.join(outdir, 'v2NoTerm', 'base') outdirdef = os.path.join(outdir, 'cluster_default', 'base') if self.__doV2Term: experiment_generator.expGenerator(args) args = [('--description=%s' % json.dumps(expDesc)), '--version=v2', ('--outDir=%s' % outdirv2noterm)] if self.__doV2noTerm: experiment_generator.expGenerator(args) args = [('--description=%s' % json.dumps(expDesc)), '--version=v2', ('--outDir=%s' % outdirdef)] if self.__doClusterDef: experiment_generator.expGenerator(args)
'Run the entire set of OPF benchmark experiments'
def testOPFBenchmarks(self):
for bm in self.listOfBenchmarks: if (not (bm in self.allBenchmarks)): raise Exception(('Unknown benchmark %s' % bm)) fifodirs = deque() baseoutdir = self.outdir iterations = self.iterations exportDicts = self.setUpExportDicts() for iter in range(iterations): for exports in exportDicts: if (len(exportDicts) > 1): prependDict = exports else: prependDict = dict() if (self.iterations > 1): prependDict['iteration'] = iter prepend = self.generatePrependPath(prependDict) self.outdir = os.path.join(baseoutdir, prepend) if ('sine' in self.listOfBenchmarks): tmpsine = self.benchmarkSine() fifodirs.append(tmpsine) if ('hotgym' in self.listOfBenchmarks): tmphotgym = self.benchmarkHotGym() fifodirs.append(tmphotgym) if ('twovars' in self.listOfBenchmarks): tmptwovars = self.benchmarkTwoVars() fifodirs.append(tmptwovars) if ('twovars2' in self.listOfBenchmarks): tmptwovars2 = self.benchmarkTwoVarsSquare() fifodirs.append(tmptwovars2) if ('threevars' in self.listOfBenchmarks): tmpthreevars = self.benchmarkThreeVars() fifodirs.append(tmpthreevars) if ('fourvars' in self.listOfBenchmarks): tmpfourvars = self.benchmarkFourVars() fifodirs.append(tmpfourvars) if ('categories' in self.listOfBenchmarks): tmpcategories = self.benchmarkCategories() fifodirs.append(tmpcategories) if ('sawtooth' in self.listOfBenchmarks): tmpcategories = self.benchmarkSawtooth() fifodirs.append(tmpcategories) if ('hotgymsc' in self.listOfBenchmarks): tmphotgymsc = self.benchmarkHotGymSC() fifodirs.append(tmphotgymsc) self.outdir = baseoutdir self.syncFiles() if self.filesOnly: return if (self.maxConcurrentJobs == 1): self.runBenchmarks = self.runBenchmarksSerial else: self.runBenchmarks = self.runBenchmarksParallel for iter in range(iterations): for exports in exportDicts: if ('sine' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'sine', exports) if ('hotgym' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'hotgym', exports) if ('twovars' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'twovars', exports) if ('twovars2' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'twovars2', exports) if ('threevars' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'threevars', exports) if ('fourvars' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'fourvars', exports) if ('categories' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'categories', exports) if ('sawtooth' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'sawtooth', exports) if ('hotgymsc' in self.listOfBenchmarks): assert self.runBenchmarks(fifodirs.popleft(), 'hotgymsc', exports) self.runJobs(self.maxConcurrentJobs) if (self.__trainFraction < 1.0): self.runProductionWorkers() self.waitForProductionWorkers() self.printResults() self.assertResults()
'Simple test to assert that ModelFactory.create() with a given specific Temporal Anomaly configuration will return a model that can return inferences'
def testTemporalAnomalyModelFactory(self):
modelConfig = {u'aggregationInfo': {u'days': 0, u'fields': [], u'hours': 0, u'microseconds': 0, u'milliseconds': 0, u'minutes': 0, u'months': 0, u'seconds': 0, u'weeks': 0, u'years': 0}, u'model': u'HTMPrediction', u'modelParams': {u'anomalyParams': {u'anomalyCacheRecords': None, u'autoDetectThreshold': None, u'autoDetectWaitRecords': 5030}, u'clEnable': False, u'clParams': {u'alpha': 0.035828933612158, u'verbosity': 0, u'regionName': u'SDRClassifierRegion', u'steps': u'1'}, u'inferenceType': u'TemporalAnomaly', u'sensorParams': {u'encoders': {u'c0_dayOfWeek': None, u'c0_timeOfDay': {u'fieldname': u'c0', u'name': u'c0', u'timeOfDay': [21, 9.49122334747737], u'type': u'DateEncoder'}, u'c0_weekend': None, u'c1': {u'fieldname': u'c1', u'name': u'c1', u'resolution': 0.8771929824561403, u'seed': 42, u'type': u'RandomDistributedScalarEncoder'}}, u'sensorAutoReset': None, u'verbosity': 0}, u'spEnable': True, u'spParams': {u'potentialPct': 0.8, u'columnCount': 2048, u'globalInhibition': 1, u'inputWidth': 0, u'boostStrength': 0.0, u'numActiveColumnsPerInhArea': 40, u'seed': 1956, u'spVerbosity': 0, u'spatialImp': u'cpp', u'synPermActiveInc': 0.0015, u'synPermConnected': 0.1, u'synPermInactiveDec': 0.0005}, u'tmEnable': True, u'tmParams': {u'activationThreshold': 13, u'cellsPerColumn': 32, u'columnCount': 2048, u'globalDecay': 0.0, u'initialPerm': 0.21, u'inputWidth': 2048, u'maxAge': 0, u'maxSegmentsPerCell': 128, u'maxSynapsesPerSegment': 32, u'minThreshold': 10, u'newSynapseCount': 20, u'outputType': u'normal', u'pamLength': 3, u'permanenceDec': 0.1, u'permanenceInc': 0.1, u'seed': 1960, u'temporalImp': u'cpp', u'verbosity': 0}, u'trainSPNetOnlyIfRequested': False}, u'predictAheadTime': None, u'version': 1} inferenceArgs = {u'inputPredictedField': u'auto', u'predictedField': u'c1', u'predictionSteps': [1]} data = [{'_category': [None], '_reset': 0, '_sequenceId': 0, '_timestamp': datetime.datetime(2013, 12, 5, 0, 0), '_timestampRecordIdx': None, u'c0': datetime.datetime(2013, 12, 5, 0, 0), u'c1': 5.0}, {'_category': [None], '_reset': 0, '_sequenceId': 0, '_timestamp': datetime.datetime(2013, 12, 6, 0, 0), '_timestampRecordIdx': None, u'c0': datetime.datetime(2013, 12, 6, 0, 0), u'c1': 6.0}, {'_category': [None], '_reset': 0, '_sequenceId': 0, '_timestamp': datetime.datetime(2013, 12, 7, 0, 0), '_timestampRecordIdx': None, u'c0': datetime.datetime(2013, 12, 7, 0, 0), u'c1': 7.0}] model = ModelFactory.create(modelConfig=modelConfig) model.enableLearning() model.enableInference(inferenceArgs) for row in data: result = model.run(row) self.assertIsInstance(result, ModelResult)
'Set up an interpreter directing output to a BytesIO stream.'
def setUp(self):
self.interpreter = SafeInterpreter(writer=io.BytesIO())
'Verify basic primitives'
def testPrimitives(self):
self.assertTrue(self.interpreter('True')) self.assertFalse(self.interpreter('False')) self.assertTrue((self.interpreter('None') is None))
'Verify basic if statements'
def testConditionals(self):
self.assertTrue(self.interpreter('True if True else False')) self.assertTrue(self.interpreter('\nfoo = False\nif not foo:\n foo = True\nfoo\n'))
'Verify that src with blacklisted nodes fail'
def testBlacklist(self):
self.interpreter('for x in []: pass') self.assertIn('NotImplementedError', (error.get_error()[0] for error in self.interpreter.error)) self.interpreter('while True: pass') self.assertIn('NotImplementedError', (error.get_error()[0] for error in self.interpreter.error))
'Verify that parse() returns an AST instance'
def testParse(self):
tree = self.interpreter.parse('True') self.assertTrue(isinstance(tree, ast.AST))
'Verify that parse() returns a compile()-able AST'
def testCompile(self):
tree = self.interpreter.parse('True') codeObj = compile(tree, '<string>', mode='exec') self.assertTrue(isinstance(codeObj, types.CodeType))
'Verify that sum() works and is correct'
def testSum(self):
result = self.interpreter('sum([x*p for x,p in {1:2}.items()])') self.assertEqual(result, 2)
'Verify that a recursive function raises a runtime error'
def testRecursive(self):
self.interpreter('\ndef foo():\n foo()\n\nfoo()\n') self.assertIn('RuntimeError', (error.get_error()[0] for error in self.interpreter.error))
'Verify that an attempt to open a file raises a runtime error'
def testOpen(self):
self.interpreter("open('foo')") self.assertIn('RuntimeError', (error.get_error()[0] for error in self.interpreter.error))
'Test that clusterParams loads returns a valid dict that can be instantiated as a HTMPredictionModel.'
def testModelParams(self):
params = getScalarMetricWithTimeOfDayAnomalyParams([0], minVal=23.42, maxVal=23.420001) encodersDict = params['modelConfig']['modelParams']['sensorParams']['encoders'] model = ModelFactory.create(modelConfig=params['modelConfig']) self.assertIsInstance(model, HTMPredictionModel, 'JSON returned cannot be used to create a model') self.assertIsNotNone(encodersDict['c0_timeOfDay']) if (encodersDict['c1']['type'] == 'RandomDistributedScalarEncoder'): self.assertGreaterEqual(encodersDict['c1']['resolution'], 0.001, 'Resolution is too low') params = getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation='tm_cpp') self.assertEqual(params['modelConfig']['modelParams']['tmParams']['temporalImp'], 'tm_cpp', 'Incorrect json for tm_cpp tmImplementation') with self.assertRaises(ValueError): getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation='')
'Trivial Average Error metric test'
def testWindowedTrivialAAE(self):
trivialAveErr = getModule(MetricSpec('trivial', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'errorMetric': 'avg_err'})) gt = [str(((i / 4) + 1)) for i in range(100)] p = [str(i) for i in range(100)] for i in xrange(len(gt)): trivialAveErr.addInstance(gt[i], p[i]) target = 0.25 self.assertTrue((abs((trivialAveErr.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Trivial AAE metric test'
def testWindowedTrivialAccuract(self):
trivialaae = getModule(MetricSpec('trivial', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'aae'})) gt = [((i / 4) + 1) for i in range(1000)] p = [i for i in range(1000)] for i in xrange(len(gt)): trivialaae.addInstance(gt[i], p[i]) target = 0.25 self.assertTrue((abs((trivialaae.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Trivial Accuracy metric test'
def testWindowedTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec('trivial', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'acc'})) gt = [str(((i / 4) + 1)) for i in range(1000)] p = [str(i) for i in range(1000)] for i in xrange(len(gt)): trivialaccuracy.addInstance(gt[i], p[i]) target = 0.75 self.assertTrue((abs((trivialaccuracy.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Trivial Average Error metric test'
def testWindowedTrivialAverageError(self):
trivialAveErr = getModule(MetricSpec('trivial', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'avg_err'})) gt = [str(((i / 4) + 1)) for i in range(500, 1000)] p = [str(i) for i in range(1000)] for i in xrange(len(gt)): trivialAveErr.addInstance(gt[i], p[i]) target = 0.25 self.assertTrue((abs((trivialAveErr.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Multistep AAE metric test'
def testMultistepAAE(self):
msp = getModule(MetricSpec('multiStep', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'aae', 'steps': 3})) gt = [(i + 1) for i in range(100)] p = [{3: {i: 0.7, 5: 0.3}} for i in range(100)] for i in xrange(len(gt)): msp.addInstance(gt[i], p[i]) target = 1 self.assertTrue((abs((msp.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Multistep AAE metric test, predicting 2 different step sizes'
def testMultistepAAEMultipleSteps(self):
msp = getModule(MetricSpec('multiStep', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'aae', 'steps': [3, 6]})) gt = [i for i in range(100)] p = [{3: {(i + 1): 0.7, 5: 0.3}, 6: {(i + 0.5): 0.7, 5: 0.3}} for i in range(100)] for i in xrange(len(gt)): msp.addInstance(gt[i], p[i]) target = 0.75 self.assertTrue((abs((msp.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Multistep with probabilities metric test'
def testMultistepProbability(self):
msp = getModule(MetricSpec('multiStepProbability', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'aae', 'steps': 3})) gt = [5 for i in range(1000)] p = [{3: {i: 0.3, 5: 0.7}} for i in range(1000)] for i in xrange(len(gt)): msp.addInstance(gt[i], p[i]) target = 283.35 self.assertTrue((abs((msp.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Multistep with probabilities metric test, predicting 2 different step sizes'
def testMultistepProbabilityMultipleSteps(self):
msp = getModule(MetricSpec('multiStepProbability', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'aae', 'steps': [1, 3]})) gt = [5 for i in range(1000)] p = [{3: {i: 0.3, 5: 0.7}, 1: {5: 1.0}} for i in range(1000)] for i in xrange(len(gt)): msp.addInstance(gt[i], p[i]) target = (283.35 / 2) self.assertTrue((abs((msp.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Moving mean Average Absolute Error metric test'
def testMovingMeanAbsoluteError(self):
movingMeanAAE = getModule(MetricSpec('moving_mean', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'mean_window': 3, 'errorMetric': 'aae'})) gt = [i for i in range(890)] gt.extend([(2 * i) for i in range(110)]) p = [i for i in range(1000)] res = [] for i in xrange(len(gt)): movingMeanAAE.addInstance(gt[i], p[i]) res.append(movingMeanAAE.getMetric()['value']) self.assertTrue((max(res[1:890]) == 2.0)) self.assertTrue((min(res[891:]) >= 4.0)) target = 4.0 self.assertTrue((abs((movingMeanAAE.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Moving mean RMSE metric test'
def testMovingMeanRMSE(self):
movingMeanRMSE = getModule(MetricSpec('moving_mean', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'mean_window': 3, 'errorMetric': 'rmse'})) gt = [i for i in range(890)] gt.extend([(2 * i) for i in range(110)]) p = [i for i in range(1000)] res = [] for i in xrange(len(gt)): movingMeanRMSE.addInstance(gt[i], p[i]) res.append(movingMeanRMSE.getMetric()['value']) self.assertTrue((max(res[1:890]) == 2.0)) self.assertTrue((min(res[891:]) >= 4.0)) target = 4.0 self.assertTrue((abs((movingMeanRMSE.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Moving mode Average Error metric test'
def testMovingModeAverageError(self):
movingModeAvgErr = getModule(MetricSpec('moving_mode', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'mode_window': 3, 'errorMetric': 'avg_err'})) gt = [(i / 4) for i in range(900)] gt.extend([((2 * i) / 4) for i in range(100)]) p = [i for i in range(1000)] res = [] for i in xrange(len(gt)): movingModeAvgErr.addInstance(gt[i], p[i]) res.append(movingModeAvgErr.getMetric()['value']) self.assertTrue((max(res[1:890]) == 0.5)) self.assertTrue((min(res[891:]) >= 0.5)) self.assertTrue((res[998] < 1.0)) target = 1.0 self.assertTrue((abs((movingModeAvgErr.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Moving mode Accuracy metric test'
def testMovingModeAccuracy(self):
movingModeACC = getModule(MetricSpec('moving_mode', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'mode_window': 3, 'errorMetric': 'acc'})) gt = [(i / 4) for i in range(900)] gt.extend([((2 * i) / 4) for i in range(100)]) p = [i for i in range(1000)] res = [] for i in xrange(len(gt)): movingModeACC.addInstance(gt[i], p[i]) res.append(movingModeACC.getMetric()['value']) self.assertTrue((min(res[1:899]) == 0.5)) self.assertTrue((max(res[900:]) <= 0.5)) self.assertTrue((res[998] > 0.0)) target = 0.0 self.assertTrue((abs((movingModeACC.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Two gram scalars test'
def testTwoGramScalars(self):
oneGram = getModule(MetricSpec('two_gram', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'predictionField': 'test', 'errorMetric': 'acc'})) encodings = [np.zeros(10) for i in range(5)] for i in range(len(encodings)): encoding = encodings[i] encoding[i] = 1 gt = [(i % 5) for i in range(1000)] res = [] for i in xrange(len(gt)): if (i == 20): oneGram.addInstance(np.zeros(10), prediction=None, record={'test': None}) else: oneGram.addInstance(encodings[(i % 5)], prediction=None, record={'test': gt[i]}) res.append(oneGram.getMetric()['value']) target = 1.0 self.assertTrue((abs((oneGram.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Two gram scalars test with step size other than 1'
def testTwoGramScalarsStepsGreaterOne(self):
oneGram = getModule(MetricSpec('two_gram', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'predictionField': 'test', 'errorMetric': 'acc', 'steps': 2})) encodings = [np.zeros(10) for i in range(5)] for i in range(len(encodings)): encoding = encodings[i] encoding[i] = 1 gt = [(i % 5) for i in range(1000)] res = [] for i in xrange(len(gt)): if (i == 20): oneGram.addInstance(np.zeros(10), prediction=None, record={'test': None}) else: oneGram.addInstance(encodings[(i % 5)], prediction=None, record={'test': gt[i]}) res.append(oneGram.getMetric()['value']) target = 1.0 self.assertTrue((abs((oneGram.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'One gram string test'
def testTwoGramStrings(self):
oneGram = getModule(MetricSpec('two_gram', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100, 'errorMetric': 'acc', 'predictionField': 'test'})) gt = [str((i % 5)) for i in range(1000)] encodings = [np.zeros(10) for i in range(5)] for i in range(len(encodings)): encoding = encodings[i] encoding[i] = 1 newElem = 100 for i in range(5, 1000, 5): gt[i] = str(newElem) newElem += 20 res = [] for i in xrange(len(gt)): if (i == 20): oneGram.addInstance(np.zeros(10), prediction=None, record={'test': None}) else: oneGram.addInstance(encodings[(i % 5)], prediction=None, record={'test': gt[i]}) res.append(oneGram.getMetric()['value']) target = 0.8 self.assertTrue((abs((oneGram.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Windowed AAE'
def testWindowedAAE(self):
waae = getModule(MetricSpec('aae', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 1})) gt = [9, 4, 5, 6] p = [0, 13, 8, 3] for i in xrange(len(gt)): waae.addInstance(gt[i], p[i]) target = 3.0 self.assertTrue((abs((waae.getMetric()['value'] - target)) < OPFMetricsTest.DELTA), ('Got %s' % waae.getMetric()))
'Accuracy'
def testAccuracy(self):
acc = getModule(MetricSpec('acc', None, None, {'verbosity': OPFMetricsTest.VERBOSITY})) gt = [0, 1, 2, 3, 4, 5] p = [0, 1, 2, 4, 5, 6] for i in xrange(len(gt)): acc.addInstance(gt[i], p[i]) target = 0.5 self.assertTrue((abs((acc.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Windowed accuracy'
def testWindowedAccuracy(self):
acc = getModule(MetricSpec('acc', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 2})) gt = [0, 1, 2, 3, 4, 5] p = [0, 1, 2, 4, 5, 6] for i in xrange(len(gt)): acc.addInstance(gt[i], p[i]) target = 0.0 self.assertTrue((abs((acc.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Ave Error'
def testAverageError(self):
err = getModule(MetricSpec('avg_err', None, None, {'verbosity': OPFMetricsTest.VERBOSITY})) gt = [1, 1, 2, 3, 4, 5] p = [0, 1, 2, 4, 5, 6] for i in xrange(len(gt)): err.addInstance(gt[i], p[i]) target = (2.0 / 3.0) self.assertTrue((abs((err.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'Windowed Ave Error'
def testWindowedAverageError(self):
err = getModule(MetricSpec('avg_err', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 2})) gt = [0, 1, 2, 3, 4, 5] p = [0, 1, 2, 4, 5, 6] for i in xrange(len(gt)): err.addInstance(gt[i], p[i]) target = 1.0 self.assertTrue((abs((err.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'RMSE'
def testLongWindowRMSE(self):
rmse = getModule(MetricSpec('rmse', None, None, {'verbosity': OPFMetricsTest.VERBOSITY, 'window': 100})) gt = [9, 4, 5, 6] p = [0, 13, 8, 3] for i in xrange(len(gt)): rmse.addInstance(gt[i], p[i]) target = 6.71 self.assertTrue((abs((rmse.getMetric()['value'] - target)) < OPFMetricsTest.DELTA))
'serialization using pickle'
def testSerialization(self):
l = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2) l.anomalyProbability('hi', 0.1, timestamp=1) l.anomalyProbability('hi', 0.1, timestamp=2) l.anomalyProbability('hello', 0.3, timestamp=3) stored = pickle.dumps(l) restored = pickle.loads(stored) self.assertEqual(l, restored)
'Test that the tailProbability function returns correct normal values'
def testNormalProbability(self):
p = {'name': 'normal', 'mean': 0.0, 'variance': 1.0, 'stdev': 1.0} self.assertWithinEpsilon(an.tailProbability(0.0, p), 0.5) self.assertWithinEpsilon(an.tailProbability(0.3, p), 0.382088578) self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.1587) self.assertWithinEpsilon(an.tailProbability(1.0, p), an.tailProbability((-1.0), p)) self.assertWithinEpsilon(an.tailProbability((-0.3), p), an.tailProbability(0.3, p)) p = {'name': 'normal', 'mean': 1.0, 'variance': 4.0, 'stdev': 2.0} self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.5) self.assertWithinEpsilon(an.tailProbability(2.0, p), 0.3085) self.assertWithinEpsilon(an.tailProbability(3.0, p), 0.1587) self.assertWithinEpsilon(an.tailProbability(3.0, p), an.tailProbability((-1.0), p)) self.assertWithinEpsilon(an.tailProbability(0.0, p), an.tailProbability(2.0, p)) p = {'name': 'normal', 'mean': (-2.0), 'variance': 0.5, 'stdev': math.sqrt(0.5)} self.assertWithinEpsilon(an.tailProbability((-2.0), p), 0.5) self.assertWithinEpsilon(an.tailProbability((-1.5), p), 0.241963652) self.assertWithinEpsilon(an.tailProbability((-2.5), p), an.tailProbability((-1.5), p))
'This passes in a known set of data and ensures the estimateNormal function returns the expected results.'
def testEstimateNormal(self):
samples = numpy.array([0.32259025, (-0.44936321), (-0.15784842), 0.72142628, 0.8794327, 0.06323451, (-0.15336159), (-0.02261703), 0.04806841, 0.47219226, 0.31102718, 0.57608799, 0.13621071, 0.92446815, 0.1870912, 0.46366935, (-0.11359237), 0.66582357, 1.20613048, (-0.17735134), 0.20709358, 0.74508479, 0.12450686, (-0.15468728), 0.3982757, 0.87924349, 0.86104855, 0.23688469, (-0.26018254), 0.10909429, 0.65627481, 0.39238532, 0.77150761, 0.47040352, 0.9676175, 0.42148897, 0.0967786, (-0.0087355), 0.84427985, 1.46526018, 1.19214798, 0.16034816, 0.81105554, 0.39150407, 0.93609919, 0.13992161, 0.6494196, 0.83666217, 0.37845278, 0.0368279, (-0.10201944), 0.41144746, 0.28341277, 0.36759426, 0.90439446, 0.05669459, (-0.11220214), 0.34616676, 0.49898439, (-0.23846184), 1.06400524, 0.72202135, (-0.2169164), 1.136582, (-0.69576865), 0.48603271, 0.72781008, (-0.04749299), 0.15469311, 0.52942518, 0.24816816, 0.3483905, 0.7284215, 0.93774676, 0.07286373, 1.6831539, 0.3851082, 0.0637406, (-0.92332861), (-0.02066161), 0.93709862, 0.82114131, 0.98631562, 0.05601529, 0.72214694, 0.09667526, 0.3857222, 0.50313998, 0.40775344, (-0.69624046), (-0.4448494), 0.99403206, 0.51639049, 0.13951548, 0.23458214, 1.00712699, 0.40939048, (-0.06436434), (-0.02753677), (-0.23017904)]) params = an.estimateNormal(samples) self.assertWithinEpsilon(params['mean'], 0.3721) self.assertWithinEpsilon(params['variance'], 0.22294) self.assertWithinEpsilon(params['stdev'], 0.47216) self.assertEqual(params['name'], 'normal')
'Test that sampleDistribution from a generated distribution returns roughly the same parameters.'
def testSampleDistribution(self):
p = {'mean': 0.5, 'name': 'normal', 'stdev': math.sqrt(0.1), 'variance': 0.1} samples = _sampleDistribution(p, 1000) np = an.estimateNormal(samples) self.assertWithinEpsilon(p['mean'], np['mean'], 0.1) self.assertWithinEpsilon(p['variance'], np['variance'], 0.1) self.assertWithinEpsilon(p['stdev'], np['stdev'], 0.1) self.assertTrue(np['name'], 'normal')
'This calls estimateAnomalyLikelihoods to estimate the distribution on fake data and validates the results'
def testEstimateAnomalyLikelihoods(self):
data1 = _generateSampleData(mean=0.2) (likelihoods, avgRecordList, estimatorParams) = an.estimateAnomalyLikelihoods(data1[0:1000]) self.assertEqual(len(likelihoods), 1000) self.assertEqual(len(avgRecordList), 1000) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) avgParams = estimatorParams['movingAverage'] total = 0 for v in avgRecordList: total = (total + v[2]) self.assertTrue(avgParams['total'], total) dParams = estimatorParams['distribution'] self.assertWithinEpsilon(dParams['mean'], (total / float(len(avgRecordList)))) self.assertLessEqual(numpy.sum((likelihoods < 0.02)), 50) self.assertGreaterEqual(numpy.sum((likelihoods < 0.02)), 1)
'This calls estimateAnomalyLikelihoods with malformed records, which should be quietly skipped.'
def testEstimateAnomalyLikelihoodsMalformedRecords(self):
data1 = _generateSampleData(mean=0.2) data1 = ((((data1[0:1000] + [(2, 2)]) + [(2, 2, 2, 2)]) + [()]) + [2]) (likelihoods, avgRecordList, estimatorParams) = an.estimateAnomalyLikelihoods(data1[0:1004]) self.assertEqual(len(likelihoods), 1000) self.assertEqual(len(avgRecordList), 1000) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) avgParams = estimatorParams['movingAverage'] total = 0 for v in avgRecordList: total = (total + v[2]) self.assertTrue(avgParams['total'], total) dParams = estimatorParams['distribution'] self.assertWithinEpsilon(dParams['mean'], (total / float(len(avgRecordList))))
'This calls estimateAnomalyLikelihoods with various values of skipRecords'
def testSkipRecords(self):
data1 = _generateSampleData(mean=0.1)[0:200] data1 = (data1 + _generateSampleData(mean=0.9)[0:200]) (likelihoods, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1, skipRecords=200) dParams = estimatorParams['distribution'] self.assertWithinEpsilon(dParams['mean'], 0.9, epsilon=0.1) (likelihoods, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1, skipRecords=500) self.assertEqual(len(likelihoods), len(data1)) self.assertTrue((likelihoods.sum() >= (0.3 * len(likelihoods)))) (likelihoods, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1, skipRecords=len(data1)) self.assertEqual(len(likelihoods), len(data1)) self.assertTrue((likelihoods.sum() >= (0.3 * len(likelihoods))))
'A slight more complex test. This calls estimateAnomalyLikelihoods to estimate the distribution on fake data, followed by several calls to updateAnomalyLikelihoods.'
def testUpdateAnomalyLikelihoods(self):
data1 = _generateSampleData(mean=0.2)[0:1000] (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1, averagingWindow=5) data2 = _generateSampleData(mean=0.6)[0:300] (likelihoods2, avgRecordList2, estimatorParams2) = an.updateAnomalyLikelihoods(data2, estimatorParams) self.assertEqual(len(likelihoods2), len(data2)) self.assertEqual(len(avgRecordList2), len(data2)) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) self.assertNotEqual(estimatorParams2['movingAverage']['total'], estimatorParams['movingAverage']['total']) self.assertGreaterEqual(numpy.sum((likelihoods2 < 0.01)), 25) self.assertLessEqual(numpy.sum((likelihoods2 < 0.01)), 250) data3 = _generateSampleData(mean=0.2)[0:1000] (likelihoods3, avgRecordList3, estimatorParams3) = an.updateAnomalyLikelihoods(data3, estimatorParams2) self.assertEqual(len(likelihoods3), len(data3)) self.assertEqual(len(avgRecordList3), len(data3)) self.assertTrue(an.isValidEstimatorParams(estimatorParams3)) self.assertNotEqual(estimatorParams3['movingAverage']['total'], estimatorParams['movingAverage']['total']) self.assertNotEqual(estimatorParams3['movingAverage']['total'], estimatorParams2['movingAverage']['total']) self.assertGreaterEqual(numpy.sum((likelihoods3 < 0.01)), 1) self.assertLessEqual(numpy.sum((likelihoods3 < 0.01)), 100) allData = data1 allData.extend(data2) allData.extend(data3) (_, historicalValuesAll, totalAll) = an._anomalyScoreMovingAverage(allData, windowSize=5) self.assertEqual(sum(historicalValuesAll), sum(estimatorParams3['movingAverage']['historicalValues'])) self.assertEqual(totalAll, estimatorParams3['movingAverage']['total'])
'This calls estimateAnomalyLikelihoods with flat distributions and ensures things don\'t crash.'
def testFlatAnomalyScores(self):
data1 = _generateSampleData(mean=42.0, variance=1e-10) (likelihoods, avgRecordList, estimatorParams) = an.estimateAnomalyLikelihoods(data1[0:1000]) self.assertEqual(len(likelihoods), 1000) self.assertEqual(len(avgRecordList), 1000) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) dParams = estimatorParams['distribution'] self.assertWithinEpsilon(dParams['mean'], data1[0][2]) data2 = _generateSampleData(mean=42.5, variance=1e-10) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data2[0:10], estimatorParams) self.assertLessEqual(likelihoods2.sum(), 0.01) data3 = _generateSampleData(mean=0.01, variance=1e-06) (_, _, estimatorParams3) = an.estimateAnomalyLikelihoods(data3[0:1000]) data4 = _generateSampleData(mean=0.1, variance=1e-06) (likelihoods4, _, estimatorParams4) = an.updateAnomalyLikelihoods(data4[0:20], estimatorParams3) self.assertLessEqual(likelihoods4[10:].mean(), 0.002) data5 = _generateSampleData(mean=0.05, variance=1e-06) (likelihoods5, _, _) = an.updateAnomalyLikelihoods(data5[0:20], estimatorParams4) self.assertLessEqual(likelihoods5[10:].mean(), 0.28) self.assertGreater(likelihoods5[10:].mean(), 0.015)
'This calls estimateAnomalyLikelihoods with flat metric values. In this case we should use the null distribution, which gets reasonably high likelihood for everything.'
def testFlatMetricScores(self):
data1 = _generateSampleData(metricMean=42.0, metricVariance=1e-10)[0:1000] (likelihoods, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1) self.assertEqual(len(likelihoods), len(data1)) self.assertTrue((likelihoods.sum() >= (0.4 * len(likelihoods)))) self.assertDictEqual(estimatorParams['distribution'], an.nullDistribution())
'This calls estimateAnomalyLikelihoods and updateAnomalyLikelihoods with one or no scores.'
def testVeryFewScores(self):
data1 = _generateSampleData(mean=42.0, variance=1e-10) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1[0:2]) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) dParams = estimatorParams['distribution'] self.assertWithinEpsilon(dParams['mean'], data1[0][2]) data1 = numpy.zeros(0) with self.assertRaises(ValueError): an.estimateAnomalyLikelihoods(data1) with self.assertRaises(ValueError): an.updateAnomalyLikelihoods(data1, estimatorParams)
'Calls updateAnomalyLikelihoods with bad params.'
def testBadParams(self):
data1 = _generateSampleData(mean=42.0, variance=1e-10) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data1[0:1]) self.assertTrue(an.isValidEstimatorParams(estimatorParams)) with self.assertRaises(ValueError): an.updateAnomalyLikelihoods(data1, {'haha': 'heehee'}) with self.assertRaises(ValueError): an.updateAnomalyLikelihoods(data1, 42.0)
'Calls _filterLikelihoods with both input types -- numpy array of floats and list of floats.'
def testFilterLikelihodsInputType(self):
l = [0.0, 0.0, 0.3, 0.3, 0.5] l2 = an._filterLikelihoods(l) n = numpy.array(l) n2 = an._filterLikelihoods(n) filtered = [0.0, 0.001, 0.3, 0.3, 0.5] for i in range(len(l)): self.assertAlmostEqual(l2[i], filtered[i], msg='Input of type list returns incorrect result') for i in range(len(n)): self.assertAlmostEqual(n2[i], filtered[i], msg='Input of type numpy array returns incorrect result')
'Tests _filterLikelihoods function for several cases: i. Likelihood goes straight to redzone, skipping over yellowzone, repeats ii. Case (i) with different values, and numpy array instead of float list iii. A scenario where changing the redzone from four to five 9s should filter differently'
def testFilterLikelihoods(self):
redThreshold = 0.9999 yellowThreshold = 0.999 l = [1.0, 1.0, 0.9, 0.8, 0.5, 0.4, 1.0, 1.0, 0.6, 0.0] l = [(1 - x) for x in l] l2 = copy.copy(l) l2[1] = (1 - yellowThreshold) l2[7] = (1 - yellowThreshold) l3 = an._filterLikelihoods(l, redThreshold=redThreshold) for i in range(len(l2)): self.assertAlmostEqual(l2[i], l3[i], msg='Failure in case (i)') l = numpy.array([0.999978229, 0.999978229, 0.999999897, 1, 1, 1, 1, 0.999999994, 0.999999966, 0.999999966, 0.999994331, 0.999516576, 0.99744487]) l = (1.0 - l) l2 = copy.copy(l) l2[1:11] = (1 - yellowThreshold) l3 = an._filterLikelihoods(l, redThreshold=redThreshold) for i in range(len(l2)): self.assertAlmostEqual(l2[i], l3[i], msg='Failure in case (ii)') l = numpy.array([0.999968329, 0.999999897, 1, 1, 1, 1, 0.999999994, 0.999999966, 0.999999966, 0.999994331, 0.999516576, 0.99744487]) l = (1.0 - l) l2a = copy.copy(l) l2b = copy.copy(l) l2a[1:10] = (1 - yellowThreshold) l2b[2:10] = (1 - yellowThreshold) l3a = an._filterLikelihoods(l, redThreshold=redThreshold) l3b = an._filterLikelihoods(l, redThreshold=0.99999) for i in range(len(l2a)): self.assertAlmostEqual(l2a[i], l3a[i], msg='Failure in case (iii), list a') for i in range(len(l2b)): self.assertAlmostEqual(l2b[i], l3b[i], msg='Failure in case (iii), list b') self.assertFalse(numpy.array_equal(l3a, l3b), msg='Failure in case (iii), list 3')
'Add sample anomaly data to the existing data list and return it. Note: this does not modify the original data list Note 2: here we just add in increasing integers as the metric value'
@staticmethod def _addSampleData(origData=None, numSamples=1440, spikeValue=1.0, spikePeriod=20):
if (origData is None): origData = [] if (len(origData) > 0): lastDate = origData[(-1)][0] else: lastDate = datetime.datetime(2013, 2, 3) dateList = _getDateList(numSamples, lastDate) data = copy.copy(origData) for (idx, date) in enumerate(dateList): if ((spikePeriod > 0) and (((idx + 1) % spikePeriod) == 0)): data.append([date, idx, spikeValue]) else: data.append([date, idx, 0.0]) return data
'No anomalies, and then you see a single spike. The likelihood of that spike should be 0'
def testCaseSingleSpike(self):
data = self._addSampleData(spikePeriod=0, numSamples=1000) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data[0:1000]) data = self._addSampleData(numSamples=1, spikePeriod=1) (likelihoods1, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertWithinEpsilon(likelihoods1[0], 0.0)
'Test B: one anomaly spike every 20 records. Then we suddenly get a bunch in a row. The likelihood of those spikes should be low.'
def testCaseUnusuallyHighSpikeFrequency(self):
data = self._addSampleData(spikePeriod=20, numSamples=1019) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data[0:1000]) data = self._addSampleData(numSamples=119, spikePeriod=20) (likelihoods1, _, estimatorParams1) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue((likelihoods1.min() > 0.1)) data = self._addSampleData(numSamples=20, spikePeriod=2) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams1) self.assertTrue(((likelihoods2[5:].sum() / 15.0) < 0.001))
'Test C: one anomaly every 20 records, but then see none. The likelihood at the end should be very low.'
@unittest.skip('Currently fails because the periodicity is greater than the window size. Requires some algorithm enhancements. Filed as https://github.com/numenta/nupic/issues/948.') def testCaseMissingSpike(self):
data = self._addSampleData(spikePeriod=20, numSamples=1019) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data[0:1000]) data = self._addSampleData(numSamples=100, spikePeriod=0) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue(((likelihoods2[5:].sum() / 15.0) < 0.0001))
'Test D: bunches of anomalies every 20 records that continue. This should not be anomalous.'
def testCaseContinuousBunchesOfSpikes(self):
data = [] for _ in range(30): data = self._addSampleData(data, spikePeriod=0, numSamples=30) data = self._addSampleData(data, spikePeriod=3, numSamples=10) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data[0:1000]) data = self._addSampleData(spikePeriod=0, numSamples=30) data = self._addSampleData(data, spikePeriod=3, numSamples=10) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue((likelihoods2.min() > 0.01))
'Test E: bunches of anomalies every 20 records that become even more frequent. This should be anomalous.'
def testCaseIncreasedSpikeFrequency(self):
data = [] for _ in range(30): data = self._addSampleData(data, spikePeriod=0, numSamples=30) data = self._addSampleData(data, spikePeriod=3, numSamples=10) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data[0:1000]) data = self._addSampleData(spikePeriod=0, numSamples=30) data = self._addSampleData(data, spikePeriod=1, numSamples=10) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue((likelihoods2[0:30].min() > 0.01)) self.assertTrue((likelihoods2[(-5):].min() < 0.002))
'Test F: bunches of anomalies every 20 records that disappear. This should be anomalous.'
@unittest.skip('Currently fails because the periodicity is greater than the window size. Requires some algorithm enhancements. Filed as https://github.com/numenta/nupic/issues/948.') def testCaseMissingBunchesOfSpikes(self):
data = [] for _ in range(30): data = self._addSampleData(data, spikePeriod=0, numSamples=30) data = self._addSampleData(data, spikePeriod=3, numSamples=10) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data) data = self._addSampleData(spikePeriod=0, numSamples=40) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue((likelihoods2[0:30].min() > 0.01)) self.assertTrue((likelihoods2[(-5):].min() < 1e-05))
'Test F: small anomaly score every 20 records, but then a large one when you would expect a small one. This should be anomalous.'
def testCaseIncreasedAnomalyScore(self):
data = [] data = self._addSampleData(data, spikePeriod=20, spikeValue=0.4, numSamples=1000) (_, _, estimatorParams) = an.estimateAnomalyLikelihoods(data) data = self._addSampleData(spikePeriod=20, spikeValue=1.0, numSamples=100) (likelihoods2, _, _) = an.updateAnomalyLikelihoods(data, estimatorParams) self.assertTrue((likelihoods2.min() < 0.0003)) self.assertTrue(((likelihoods2 < 0.0003).sum() > 40))
'Test that the most frequent possible option is chosen for a scalar encoded field'
def testCategory(self):
self.frequency(n=100, w=21, seed=SEED, numColors=90, encoder='scalar')
'Test that the most frequent possible option is chosen for a category encoded field'
def testScalar(self):
self.frequency(n=30, w=21, seed=SEED, numColors=90, encoder='category')
'Test that the most frequent possible option is chosen for a scalar encoded field. Run through many different numbers of patterns and random seeds'
@unittest.skip('Not working...') def testScalarLong(self):
for n in [52, 70, 80, 90, 100, 110]: self.frequency(n=100, w=21, seed=SEED, numColors=n, encoder='scalar')
'Test that the most frequent possible option is chosen for a category encoded field. Run through many different numbers of patterns and random seeds'
@unittest.skip('Not working...') def testCategoryLong(self):
for n in [52, 70, 80, 90, 100, 110]: self.frequency(n=100, w=21, seed=SEED, numColors=n)
'Helper function that tests whether the SP predicts the most frequent record'
def frequency(self, n=15, w=7, columnDimensions=2048, numActiveColumnsPerInhArea=40, stimulusThreshold=0, spSeed=1, spVerbosity=0, numColors=2, seed=42, minVal=0, maxVal=10, encoder='category', forced=True):
print '\nRunning SP overlap test...' print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors' spImpl = SpatialPooler(columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=(n / 2), numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, potentialPct=0.5, seed=spSeed, globalInhibition=True) rnd.seed(seed) numpy.random.seed(seed) colors = [] coincs = [] reUsedCoincs = [] spOutput = [] patterns = set([]) if (encoder == 'scalar'): enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal, maxval=maxVal, periodic=False, forced=True) for y in xrange(numColors): temp = enc.encode((rnd.random() * maxVal)) colors.append(numpy.array(temp, dtype=realDType)) else: for y in xrange(numColors): sdr = numpy.zeros(n, dtype=realDType) sdr[rnd.sample(xrange(n), w)] = 1 colors.append(sdr) print 'Starting to train the sp on', numColors, 'patterns' startTime = time.time() for i in xrange(numColors): spInput = colors[i] onCells = numpy.zeros(columnDimensions) spImpl.compute(spInput, True, onCells) spOutput.append(onCells.tolist()) activeCoincIndices = set(onCells.nonzero()[0]) reUsed = activeCoincIndices.intersection(patterns) if (len(reUsed) == 0): coincs.append((i, activeCoincIndices, colors[i])) else: reUsedCoincs.append((i, activeCoincIndices, colors[i])) patterns.update(activeCoincIndices) if (((i + 1) % 100) == 0): print 'Record number:', (i + 1) print ('Elapsed time: %.2f seconds' % (time.time() - startTime)) print len(reUsedCoincs), 're-used coinc(s),' summ = [] for z in coincs: summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs])) zeros = len([x for x in summ if (x == 0)]) factor = ((max(summ) * len(summ)) / sum(summ)) if (len(reUsed) < 10): self.assertLess(factor, 41, ('\nComputed factor: %d\nExpected Less than %d' % (factor, 41))) self.assertLess(zeros, (0.99 * len(summ)), ('\nComputed zeros: %d\nExpected Less than %d' % (zeros, (0.99 * len(summ))))) else: self.assertLess(factor, 8, ('\nComputed factor: %d\nExpected Less than %d' % (factor, 8))) self.assertLess(zeros, 12, ('\nComputed zeros: %d\nExpected Less than %d' % (zeros, 12)))
'Send same value 10 times and expect high likelihood for prediction.'
def testSingleValue(self):
classifier = self._classifier(steps=[1], alpha=1.0) retval = [] for recordNum in xrange(10): retval = self._compute(classifier, recordNum, [1, 5], 0, 10) self.assertEqual(retval['actualValues'][0], 10) self.assertGreater(retval[1][0], 0.9)
'Send same value 10 times and expect high likelihood for prediction using 0-step ahead prediction'
def testSingleValue0Steps(self):
classifier = self._classifier(steps=[0], alpha=1.0) retval = [] for recordNum in xrange(10): retval = self._compute(classifier, recordNum, [1, 5], 0, 10) self.assertEqual(retval['actualValues'][0], 10) self.assertGreater(retval[0][0], 0.9)
'Test missing record support. Here, we intend the classifier to learn the associations: [1,3,5] => bucketIdx 1 [2,4,6] => bucketIdx 2 [7,8,9] => don"t care If it doesn"t pay attention to the recordNums in this test, it will learn the wrong associations.'
def testMissingRecords(self):
c = self._classifier([1], 1.0, 0.1, 0) recordNum = 0 c.compute(recordNum=recordNum, patternNZ=[1, 3, 5], classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=True) recordNum += 1 c.compute(recordNum=recordNum, patternNZ=[2, 4, 6], classification={'bucketIdx': 1, 'actValue': 1}, learn=True, infer=True) recordNum += 1 c.compute(recordNum=recordNum, patternNZ=[1, 3, 5], classification={'bucketIdx': 2, 'actValue': 2}, learn=True, infer=True) recordNum += 1 c.compute(recordNum=recordNum, patternNZ=[2, 4, 6], classification={'bucketIdx': 1, 'actValue': 1}, learn=True, infer=True) recordNum += 1 result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5], classification={'bucketIdx': 2, 'actValue': 2}, learn=True, infer=True) recordNum += 1 self.assertLess(result[1][0], 0.1) self.assertGreater(result[1][1], 0.9) self.assertLess(result[1][2], 0.1) result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6], classification={'bucketIdx': 1, 'actValue': 1}, learn=True, infer=True) recordNum += 1 self.assertLess(result[1][0], 0.1) self.assertLess(result[1][1], 0.1) self.assertGreater(result[1][2], 0.9) recordNum += 1 result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5], classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=True) recordNum += 1 self.assertLess(result[1][0], 0.1) self.assertGreater(result[1][1], 0.9) self.assertLess(result[1][2], 0.1) recordNum += 1 result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6], classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=True) recordNum += 1 self.assertLess(result[1][0], 0.1) self.assertLess(result[1][1], 0.1) self.assertGreater(result[1][2], 0.9) recordNum += 1 result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5], classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=True) recordNum += 1 self.assertLess(result[1][0], 0.1) self.assertGreater(result[1][1], 0.9) self.assertLess(result[1][2], 0.1)
'Test missing record edge TestCase Test an edge case in the classifier initialization when there is a missing record in the first n records, where n is the # of prediction steps.'
def testMissingRecordInitialization(self):
c = self._classifier([2], 0.1, 0.1, 0) result = c.compute(recordNum=0, patternNZ=[1, 5, 9], classification={'bucketIdx': 0, 'actValue': 34.7}, learn=True, infer=True) result = c.compute(recordNum=2, patternNZ=[1, 5, 9], classification={'bucketIdx': 0, 'actValue': 34.7}, learn=True, infer=True) self.assertSetEqual(set(result.keys()), set(('actualValues', 2))) self.assertEqual(len(result['actualValues']), 1) self.assertAlmostEqual(result['actualValues'][0], 34.7)
'Test the distribution of predictions. Here, we intend the classifier to learn the associations: [1,3,5] => bucketIdx 0 (30%) => bucketIdx 1 (30%) => bucketIdx 2 (40%) [2,4,6] => bucketIdx 1 (50%) => bucketIdx 3 (50%) The classifier should get the distribution almost right given enough repetitions and a small learning rate'
def testPredictionDistribution(self):
c = self._classifier([0], 0.001, 0.1, 0) SDR1 = [1, 3, 5] SDR2 = [2, 4, 6] recordNum = 0 random.seed(42) for _ in xrange(5000): randomNumber = random.random() if (randomNumber < 0.3): bucketIdx = 0 elif (randomNumber < 0.6): bucketIdx = 1 else: bucketIdx = 2 c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 randomNumber = random.random() if (randomNumber < 0.5): bucketIdx = 1 else: bucketIdx = 3 c.compute(recordNum=recordNum, patternNZ=SDR2, classification={'bucketIdx': bucketIdx, 'actValue': bucketIdx}, learn=True, infer=False) recordNum += 1 result1 = c.compute(recordNum=recordNum, patternNZ=SDR1, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result1[0][0], 0.3, places=1) self.assertAlmostEqual(result1[0][1], 0.3, places=1) self.assertAlmostEqual(result1[0][2], 0.4, places=1) result2 = c.compute(recordNum=recordNum, patternNZ=SDR2, classification=None, learn=False, infer=True) recordNum += 1 self.assertAlmostEqual(result2[0][1], 0.5, places=1) self.assertAlmostEqual(result2[0][3], 0.5, places=1)