desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Try running a simple permutations'
| def testSmartSpeculation(self, onCluster=True, env=None, **kwargs):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_temporal')
if (env is None):
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
env['NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm'] = ('%d' % 1)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobInfoStr = cjDAO.jobGetFields(jobID, ['results', 'engWorkerState'])
jobResultsStr = jobInfoStr[0]
engState = jobInfoStr[1]
engState = json.loads(engState)
swarms = engState['swarms']
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
correctOrder = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'Pred']
correctOrder = [(prefix + x) for x in correctOrder]
for swarm in swarms:
if (swarms[swarm]['status'] == 'killed'):
swarmId = swarm.split('.')
if (len(swarmId) > 1):
wrong = 0
for i in range((len(swarmId) - 2)):
if (correctOrder[i] != swarmId[i]):
wrong = 1
assert (wrong == 1), ('Some of the killed swarms should not have been ' + 'killed as they are a legal combination.')
if (swarms[swarm]['status'] == 'completed'):
swarmId = swarm.split('.')
if (len(swarmId) > 3):
for i in range((len(swarmId) - 3)):
if (correctOrder[i] != swarmId[i]):
assert False, 'Some of the completed swarms should not have finished as they are illegal combinations'
if (swarms[swarm]['status'] == 'active'):
assert False, 'Some swarms are still active at the end of hypersearch'
pass
|
'Test that smart speculation does the right thing with spatial
classification models. This also applies to temporal models where the
predicted field is optional (or excluded) since Hypersearch treats them
the same.'
| def testSmartSpeculationSpatialClassification(self, onCluster=True, env=None, **kwargs):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_spatial_classification')
if (env is None):
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
env['NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm'] = ('%d' % 1)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, maxNumWorkers=5, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobInfoStr = cjDAO.jobGetFields(jobID, ['results', 'engWorkerState'])
jobResultsStr = jobInfoStr[0]
engState = jobInfoStr[1]
engState = json.loads(engState)
swarms = engState['swarms']
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
correctOrder = ['A', 'B', 'C']
correctOrder = [(prefix + x) for x in correctOrder]
for swarm in swarms:
if (swarms[swarm]['status'] == 'killed'):
swarmId = swarm.split('.')
if (len(swarmId) > 1):
if (correctOrder[0] in swarmId):
raise RuntimeError('Some of the killed swarms should not have been killed as they are a legal combination.')
elif (swarms[swarm]['status'] == 'completed'):
swarmId = swarm.split('.')
if (len(swarmId) >= 2):
for i in range((len(swarmId) - 1)):
if (correctOrder[i] != swarmId[i]):
raise RuntimeError('Some of the completed swarms should not have finished as they are illegal combinations')
elif (swarms[swarm]['status'] == 'active'):
raise RuntimeError('Some swarms are still active at the end of hypersearch')
|
'Try running a simple permutations'
| def testFieldBranching(self, onCluster=True, env=None, **kwargs):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'max_branching_temporal')
if (env is None):
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
env['NTA_CONF_PROP_nupic_hypersearch_max_field_branching'] = ('%d' % 4)
env['NTA_CONF_PROP_nupic_hypersearch_min_field_contribution'] = ('%f' % (-20.0))
env['NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm'] = ('%d' % 2)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
expectedSwarmId = (prefix + ('.' + prefix).join(['attendance', 'home_winloss', 'timestamp_dayOfWeek', 'timestamp_timeOfDay', 'visitor_winloss']))
assert (params['particleState']['swarmId'] == expectedSwarmId), params['particleState']['swarmId']
assert (bestModel.optimizedMetric == 432), bestModel.optimizedMetric
env['NTA_CONF_PROP_nupic_hypersearch_max_field_branching'] = ('%d' % 3)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
expectedSwarmId = (prefix + ('.' + prefix).join(['attendance', 'home_winloss', 'timestamp_timeOfDay', 'visitor_winloss']))
assert (params['particleState']['swarmId'] == expectedSwarmId), params['particleState']['swarmId']
assert (bestModel.optimizedMetric == 465), bestModel.optimizedMetric
env['NTA_CONF_PROP_nupic_hypersearch_max_field_branching'] = ('%d' % 5)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
expectedSwarmId = (prefix + ('.' + prefix).join(['attendance', 'home_winloss', 'precip', 'timestamp_dayOfWeek', 'timestamp_timeOfDay', 'visitor_winloss']))
assert (params['particleState']['swarmId'] == expectedSwarmId), params['particleState']['swarmId']
assert (bestModel.optimizedMetric == 390), bestModel.optimizedMetric
env['NTA_CONF_PROP_nupic_hypersearch_max_field_branching'] = ('%d' % 0)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=100, dummyModel={'iterations': 200}, **kwargs)
cjDAO = ClientJobsDAO.get()
jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
bestModel = cjDAO.modelsInfo([jobResults['bestModel']])[0]
params = json.loads(bestModel.params)
prefix = 'modelParams|sensorParams|encoders|'
expectedSwarmId = (prefix + ('.' + prefix).join(['attendance', 'daynight', 'visitor_winloss']))
assert (params['particleState']['swarmId'] == expectedSwarmId), params['particleState']['swarmId']
assert (bestModel.optimizedMetric == 406), bestModel.optimizedMetric
return
|
'Test minimum field contribution threshold for a field to be included in further sprints'
| def testFieldThreshold(self, onCluster=True, env=None, **kwargs):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testFieldThreshold(onCluster=True)
|
'Try running a simple permutations'
| def testFieldContributions(self, onCluster=True, env=None, **kwargs):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'field_contrib_temporal')
if (env is None):
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs)
cjDAO = ClientJobsDAO.get()
jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
actualFieldContributions = jobResults['fieldContributions']
print 'Actual field contributions:', actualFieldContributions
expectedFieldContributions = {'consumption': 0.0, 'address': 0.0, 'timestamp_timeOfDay': 20.0, 'timestamp_dayOfWeek': 50.0, 'gym': 10.0}
for (key, value) in expectedFieldContributions.items():
self.assertEqual(actualFieldContributions[key], value, ("actual field contribution from field '%s' does not match the expected value of %f" % (key, value)))
return
|
'Try running a simple permutations through a real CLA model'
| def testHTMPredictionModelV2(self):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testHTMPredictionModelV2(onCluster=True, maxModels=4)
|
'Try running a simple permutations through a real CLA model that
uses multistep'
| def testCLAMultistepModel(self):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testCLAMultistepModel(onCluster=True, maxModels=4)
|
'Try running a simple permutations through a real CLA model that
uses multistep'
| def testLegacyCLAMultistepModel(self):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testLegacyCLAMultistepModel(onCluster=True, maxModels=4)
|
'Try running a simple permutations where certain field combinations
take longer to complete, this lets us test that we successfully kill
models in bad swarms that are still running.'
| def testSimpleV2VariableWaits(self):
| self._printTestHeader()
env = dict()
env['NTA_TEST_variableWaits'] = 'True'
env['NTA_TEST_numIterations'] = '100'
inst = OneNodeTests('testSimpleV2')
return inst.testSimpleV2(onCluster=True, env=env)
|
'Run a worker on a model for a while, then have it exit before the
model finishes. Then, run another worker, which should detect the orphaned
model.'
| def testOrphanedModel(self, modelRange=(0, 2)):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2')
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_TEST_sysExitModelRange'] = ('%d,%d' % (modelRange[0], modelRange[1]))
env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=500, onCluster=True, env=env, waitForCompletion=True, maxNumWorkers=4)
self.assertEqual(minErrScore, 20)
self.assertLess(len(resultInfos), 500)
return
|
'Test behavior when a worker marks 2 models orphaned at the same time.'
| def testTwoOrphanedModels(self, modelRange=(0, 2)):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'oneField')
env = dict()
env['NTA_TEST_numIterations'] = '99'
env['NTA_TEST_delayModelRange'] = ('%d,%d' % (modelRange[0], modelRange[1]))
env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1'
env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=100, onCluster=True, env=env, waitForCompletion=True, maxNumWorkers=4)
self.assertEqual(minErrScore, 50)
self.assertLess(len(resultInfos), 100)
return
|
'Run a worker on a model for a while, then have it exit before the
model finishes. Then, run another worker, which should detect the orphaned
model.'
| def testOrphanedModelGen1(self):
| self._printTestHeader()
inst = MultiNodeTests(self._testMethodName)
return inst.testOrphanedModel(modelRange=(10, 11))
|
'Test to make sure that the maxModels parameter doesn\'t include
orphaned models. Run a test with maxModels set to 2, where one becomes
orphaned. At the end, there should be 3 models in the models table, one
of which will be the new model that adopted the orphaned model'
| def testOrphanedModelMaxModels(self):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummyV2')
numModels = 5
env = dict()
env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '3'
env['NTA_TEST_max_num_models'] = str(numModels)
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=numModels, env=env, onCluster=True, waitForCompletion=True, dummyModel={'metricValue': ['25', '50'], 'sysExitModelRange': '0, 1', 'iterations': 20})
cjDB = ClientJobsDAO.get()
self.assertGreaterEqual(len(resultInfos), (numModels + 1))
completionReasons = [x.completionReason for x in resultInfos]
self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_EOF), numModels)
self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_ORPHAN), 1)
|
'Test for the correct behavior when a model uses a different connection id
than what is stored in the db. The correct behavior is for the worker to log
this as a warning and move on to a new model'
| def testOrphanedModelConnection(self):
| self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummy_multi_v2')
numModels = 2
env = dict()
env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1'
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=numModels, env=env, onCluster=True, waitForCompletion=True, dummyModel={'metricValue': ['25', '50'], 'sleepModelRange': '0, 1:5', 'iterations': 20})
cjDB = ClientJobsDAO.get()
self.assertGreaterEqual(len(resultInfos), numModels, ('%d were run. Expecting %s' % (len(resultInfos), (numModels + 1))))
completionReasons = [x.completionReason for x in resultInfos]
self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_EOF), numModels)
self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_ORPHAN), 1)
|
'Run a worker on a model for a while, then have it exit before the
model finishes. Then, run another worker, which should detect the orphaned
model.'
| def testErredModel(self, modelRange=(6, 7)):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testErredModel(onCluster=True)
|
'Run a worker on a model for a while, then have it exit before the
model finishes. Then, run another worker, which should detect the orphaned
model.'
| def testJobFailModel(self):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testJobFailModel(onCluster=True)
|
'Run a worker on a model for a while, then have it exit before the
model finishes. Then, run another worker, which should detect the orphaned
model.'
| def testTooManyErredModels(self, modelRange=(5, 10)):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testTooManyErredModels(onCluster=True)
|
'Try running a simple permutations'
| def testSpatialClassification(self):
| self._printTestHeader()
inst = OneNodeTests(self._testMethodName)
return inst.testSpatialClassification(onCluster=True)
|
'Test to make sure that the best model continues running even when it has
matured. The 2nd model (constant) will be marked as mature first and will
continue to run till the end. The 2nd model reaches maturity and should
stop before all the records are consumed, and should be the best model
because it has a lower error'
| def testMatureInterleaved(self):
| self._printTestHeader()
self.expDir = os.path.join(g_myEnv.testSrcExpDir, ('dummy_multi_v%d' % 2))
self.env['NTA_TEST_max_num_models'] = '2'
(jobID, _, _, _, _) = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=2, loggingLevel=g_myEnv.options.logLevel, env=self.env, onCluster=True, dummyModel={'metricFunctions': ['lambda x: -10*math.log10(x+1) +100', 'lambda x: 100.0'], 'delay': [2.0, 0.0], 'waitTime': [0.05, 0.01], 'iterations': 500, 'experimentDirectory': self.expDir})
cjDB = ClientJobsDAO.get()
(modelIDs, records, completionReasons, matured) = zip(*self.getModelFields(jobID, ['numRecords', 'completionReason', 'engMatured']))
results = cjDB.jobGetFields(jobID, ['results'])[0]
results = json.loads(results)
self.assertEqual(results['bestModel'], modelIDs[0])
self.assertEqual(records[1], 500)
self.assertTrue(((records[0] > 100) and (records[0] < 500)), ('Model 2 num records: 100 < %d < 500 ' % records[1]))
self.assertEqual(completionReasons[1], cjDB.CMPL_REASON_EOF)
self.assertEqual(completionReasons[0], cjDB.CMPL_REASON_STOPPED)
self.assertTrue(matured[0], True)
|
'Sanity check to make sure that when only 1 model is running, it continues
to run even when it has reached maturity'
| def testConstant(self):
| self._printTestHeader()
(jobID, _, _, _, _) = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=1, loggingLevel=g_myEnv.options.logLevel, env=self.env, dummyModel={'metricFunctions': ['lambda x: 100'], 'iterations': 350, 'experimentDirectory': self.expDir})
cjDB = ClientJobsDAO.get()
modelIDs = cjDB.jobGetModelIDs(jobID)
dbResults = cjDB.modelsGetFields(modelIDs, ['numRecords', 'completionReason', 'engMatured'])
modelIDs = [x[0] for x in dbResults]
records = [x[1][0] for x in dbResults]
completionReasons = [x[1][1] for x in dbResults]
matured = [x[1][2] for x in dbResults]
results = cjDB.jobGetFields(jobID, ['results'])[0]
results = json.loads(results)
self.assertEqual(results['bestModel'], min(modelIDs))
self.assertEqual(records[0], 350)
self.assertEqual(completionReasons[0], cjDB.CMPL_REASON_EOF)
self.assertEqual(matured[0], True)
|
'Run with one really bad swarm to see if terminator picks it up correctly'
| def testSimple(self, useCluster=False):
| if (not g_myEnv.options.runInProc):
self.skipTest('Skipping One Node test since runInProc is not specified')
self._printTestHeader()
expDir = os.path.join(g_myEnv.testSrcExpDir, 'swarm_v2')
(jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=None, onCluster=useCluster, env=self.env, dummyModel={'iterations': 200})
cjDB = ClientJobsDAO.get()
jobResultsStr = cjDB.jobGetFields(jobID, ['results'])[0]
jobResults = json.loads(jobResultsStr)
terminatedSwarms = jobResults['terminatedSwarms']
swarmMaturityWindow = int(configuration.Configuration.get('nupic.hypersearch.swarmMaturityWindow'))
prefix = 'modelParams|sensorParams|encoders|'
for (swarm, (generation, scores)) in terminatedSwarms.iteritems():
if ((prefix + 'gym') in swarm.split('.')):
self.assertEqual(generation, (swarmMaturityWindow - 1))
else:
self.assertEqual(generation, ((swarmMaturityWindow - 1) + 4))
|
'Parse our command-line args/options and strip them from sys.argv
Returns the tuple (parsedOptions, remainingArgs)'
| @classmethod
def _processArgs(cls):
| helpString = '%prog [options...] [-- unittestoptions...] [suitename.testname | suitename]\n Run the Hypersearch unit tests. To see unit test framework options, enter:\n python %prog -- --help\n\n Example usages:\n python %prog MultiNodeTests\n python %prog MultiNodeTests.testOrphanedModel\n python %prog -- MultiNodeTests.testOrphanedModel\n python %prog -- --failfast\n python %prog -- --failfast OneNodeTests.testOrphanedModel\n\n Available suitename.testnames: '
allTests = _getTestList()
for test in allTests:
helpString += ('\n %s' % test)
parser = OptionParser(helpString, conflict_handler='resolve')
parser.add_option('--verbosity', default=0, type='int', help='Verbosity level, either 0, 1, 2, or 3 [default: %default].')
parser.add_option('--runInProc', action='store_true', default=False, help='Run inProc tests, currently inProc are not being run by default running. [default: %default].')
parser.add_option('--logLevel', action='store', type='int', default=logging.INFO, help='override default log level. Pass in an integer value that represents the desired logging level (10=logging.DEBUG, 20=logging.INFO, etc.) [default: %default].')
parser.add_option('--hs', dest='hsVersion', default=2, type='int', help='Hypersearch version (only 2 supported; 1 was deprecated) [default: %default].')
return parser.parse_args(args=cls.args)
|
'Returns the test arguments after parsing'
| @classmethod
def parseArgs(cls):
| return cls._processArgs()[0]
|
'Consumes the test arguments and returns the remaining arguments meant
for unittest.man'
| @classmethod
def consumeArgs(cls):
| return cls._processArgs()[1]
|
'Test a single set of sequences once and check that individual
predictions reflect the true relative frequencies. Return a success code
as well as the trained TM. Success code is 1 for pass, 0 for fail.
The trainingSet is a set of 3 sequences that share the same first 4
elements but differ in the 5th element. After feeding in the first 4 elements,
we want to correctly compute the confidences for the 5th element based on
the frequency with which each sequence was presented during learning.
For example:
trainingSequences[0]: (10% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
trainingSequences[1]: (20% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat F: (array([25, 26, 27, 28, 29]),)
trainingSequences[2]: (70% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat G: (array([30, 31, 32, 33, 34]),)
allTrainingPatterns:
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
pat F: (array([25, 26, 27, 28, 29]),)
pat G: (array([30, 31, 32, 33, 34]),)'
| def _testSequence(self, trainingSet, nSequencePresentations=1, tm=None, testSequences=None, doResets=True, relativeFrequencies=None):
| trainingSequences = trainingSet[0]
trainingFrequencies = trainingSet[1]
allTrainingPatterns = trainingSet[2]
trainingCummulativeFrequencies = numpy.cumsum(trainingFrequencies)
if (testSequences == None):
testSequences = trainingSequences
if (VERBOSITY > 1):
print '============= Learning ================='
for r in xrange(nSequencePresentations):
whichSequence = numpy.searchsorted(trainingCummulativeFrequencies, _RGEN.random_sample())
trainingSequence = trainingSequences[whichSequence]
if (VERBOSITY > 2):
print ('=========Presentation #%d Sequence #%d==============' % (r, whichSequence))
if doResets:
tm.reset()
for (t, x) in enumerate(trainingSequence):
if (VERBOSITY > 3):
print 'Time step', t
print 'Input: ', tm.printInput(x)
tm.learn(x)
if (VERBOSITY > 4):
tm.printStates(printPrevious=(VERBOSITY > 4))
print
if (VERBOSITY > 4):
print 'Sequence finished. Complete state after sequence'
tm.printCells()
print
tm.finishLearning()
if (VERBOSITY > 2):
print 'Training completed. Complete state:'
tm.printCells()
print
print 'TM parameters:'
print tm.printParameters()
if (VERBOSITY > 1):
print '============= Inference ================='
testSequence = testSequences[0]
slen = len(testSequence)
tm.collectStats = True
tm.resetStats()
if doResets:
tm.reset()
for (t, x) in enumerate(testSequence):
if (VERBOSITY > 2):
print 'Time step', t, '\nInput:', tm.printInput(x)
tm.infer(x)
if (VERBOSITY > 3):
tm.printStates(printPrevious=(VERBOSITY > 4), printLearnState=False)
print
if (t == (slen - 2)):
tmNonZeros = [pattern.nonzero()[0] for pattern in allTrainingPatterns]
predictionScore2 = tm._checkPrediction(tmNonZeros)[2]
if (VERBOSITY > 0):
print 'predictionScore:', predictionScore2
patternConfidenceScores = numpy.array([x[1] for x in predictionScore2])
patternConfidenceScores /= patternConfidenceScores.sum()
msg = ('Prediction failed with predictionScore: %s. Expected %s but got %s.' % (str(predictionScore2), str(relativeFrequencies), str(patternConfidenceScores[4:])))
self.assertLess(abs((patternConfidenceScores[4] - relativeFrequencies[0])), 0.1, msg=msg)
self.assertLess(abs((patternConfidenceScores[5] - relativeFrequencies[1])), 0.1, msg=msg)
self.assertLess(abs((patternConfidenceScores[6] - relativeFrequencies[2])), 0.1, msg=msg)
|
'Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.'
| def testFastLearning(self):
| numOnBitsPerPattern = 3
baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBitsPerPattern, initialPerm=0.6, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True)
print '\nRunning without PAM, 3 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=20, expMissingMax=None, pamLength=1, nTrainRepetitions=3))
print '\nRunning with PAM, 3 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=3))
|
'Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.'
| def testSlowLearning(self):
| numOnBitsPerPattern = 3
baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBitsPerPattern, initialPerm=0.11, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True)
print '\nRunning without PAM, 10 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10))
print '\nRunning with PAM, 10 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10))
|
'Test with slow learning, some overlap in the patterns, and TM thresholds
of 80% of newSynapseCount
Make sure PAM allows us to train with fewer repeats of the training data.'
| def testSlowLearningWithOverlap(self):
| if SHORT:
self.skipTest('Test skipped by default. Enable with --long.')
numOnBitsPerPattern = 5
baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=2, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=int((0.8 * numOnBitsPerPattern)), minThreshold=int((0.8 * numOnBitsPerPattern)), newSynapseCount=numOnBitsPerPattern, initialPerm=0.11, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True)
print '\nRunning without PAM, 10 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10))
print '\nRunning with PAM, 10 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10))
|
'Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
We will test with both fast and slow learning.
Make sure PAM allows us to train with fewer repeats of the training data.'
| def testForbesLikeData(self):
| if SHORT:
self.skipTest('Test skipped by default. Enable with --long.')
numOnBitsPerPattern = 3
baseParams = dict(seqFunction=buildSequencePool, numSequences=20, seqLen=[3, 10], numPatterns=10, numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=1, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=int((0.8 * numOnBitsPerPattern)), minThreshold=int((0.8 * numOnBitsPerPattern)), newSynapseCount=numOnBitsPerPattern, initialPerm=0.51, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, checkSynapseConsistency=False, nTrainRepetitions=8, doResets=True)
print '\nRunning without PAM, fast learning, 2 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=50, expMissingMax=None, pamLength=1, nTrainRepetitions=2))
print '\nRunning with PAM, fast learning, 2 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=2))
print '\nRunning without PAM, slow learning, 8 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=1, expMissingMax=None, initialPerm=0.31, pamLength=1, nTrainRepetitions=8))
print '\nRunning with PAM, slow learning, 8 repetitions of the training data...'
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, initialPerm=0.31, pamLength=5, nTrainRepetitions=8))
|
'Test the KNN classifier in this module. short can be:
0 (short), 1 (medium), or 2 (long)'
| def runTestKNNClassifier(self, short=0):
| failures = ''
if (short != 2):
numpy.random.seed(42)
else:
seed_value = int(time.time())
numpy.random.seed(seed_value)
LOGGER.info('Seed used: %d', seed_value)
f = open('seedval', 'a')
f.write(str(seed_value))
f.write('\n')
f.close()
failures += simulateKMoreThanOne()
LOGGER.info('\nTesting KNN Classifier on dense patterns')
(numPatterns, numClasses) = getNumTestPatterns(short)
patternSize = 100
patterns = numpy.random.rand(numPatterns, patternSize)
patternDict = dict()
testDict = dict()
for i in xrange(numPatterns):
patternDict[i] = dict()
patternDict[i]['pattern'] = patterns[i]
patternDict[i]['category'] = numpy.random.randint(0, (numClasses - 1))
testDict[i] = copy.deepcopy(patternDict[i])
testDict[i]['pattern'][:int((0.02 * patternSize))] = numpy.random.rand()
testDict[i]['category'] = None
LOGGER.info('\nTesting KNN Classifier with L2 norm')
knn = KNNClassifier(k=1)
failures += simulateClassifier(knn, patternDict, 'KNN Classifier with L2 norm test')
LOGGER.info('\nTesting KNN Classifier with L1 norm')
knnL1 = KNNClassifier(k=1, distanceNorm=1.0)
failures += simulateClassifier(knnL1, patternDict, 'KNN Classifier with L1 norm test')
LOGGER.info('\nTesting KNN Classifier with exact matching. For testing we slightly alter the training data and expect None to be returned for the classifications.')
knnExact = KNNClassifier(k=1, exact=True)
failures += simulateClassifier(knnExact, patternDict, 'KNN Classifier with exact matching test', testDict=testDict)
(numPatterns, numClasses) = getNumTestPatterns(short)
patterns = (numpy.random.rand(numPatterns, 25) > 0.7).astype(RealNumpyDType)
patternDict = dict()
for i in patterns:
iString = str(i.tolist())
if (not patternDict.has_key(iString)):
randCategory = numpy.random.randint(0, (numClasses - 1))
patternDict[iString] = dict()
patternDict[iString]['pattern'] = i
patternDict[iString]['category'] = randCategory
LOGGER.info('\nTesting KNN on sparse patterns')
knnDense = KNNClassifier(k=1)
failures += simulateClassifier(knnDense, patternDict, 'KNN Classifier on sparse pattern test')
self.assertEqual(len(failures), 0, ('Tests failed: \n' + failures))
if (short == 2):
f = open('seedval', 'a')
f.write('Pass\n')
f.close()
|
'Basic first order sequences'
| def testFirstOrder(self):
| self.init()
sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 0)
self.feedTM(sequence, num=2)
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequence, num=4)
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
|
'High order sequences (in order)'
| def testHighOrder(self):
| self.init()
sequenceA = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
sequenceB = self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None])
self.feedTM(sequenceA, num=5)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequenceB)
self.feedTM(sequenceB, num=2)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[1]), 1)
self.feedTM(sequenceB, num=3)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[2]), 1)
self.feedTM(sequenceB, num=3)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[3]), 1)
self.feedTM(sequenceA, num=10)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
|
'High order sequences (alternating)'
| def testHighOrderAlternating(self):
| self.init()
sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
sequence += self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None])
self.feedTM(sequence)
self.feedTM(sequence, num=10)
self.feedTM(sequence, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[7]), 1)
|
'Endlessly repeating sequence of 2 elements'
| def testEndlesslyRepeating(self):
| self.init({'columnDimensions': [2]})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
for _ in xrange(7):
self.feedTM(sequence)
self.feedTM(sequence, num=50)
|
'Endlessly repeating sequence of 2 elements with maxNewSynapseCount=1'
| def testEndlesslyRepeatingWithNoNewSynapses(self):
| self.init({'columnDimensions': [2], 'maxNewSynapseCount': 1, 'cellsPerColumn': 10})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
for _ in xrange(7):
self.feedTM(sequence)
self.feedTM(sequence, num=100)
|
'Long repeating sequence with novel pattern at the end'
| def testLongRepeatingWithNovelEnding(self):
| self.init({'columnDimensions': [3]})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
sequence *= 10
sequence += [self.patternMachine.get(2), None]
for _ in xrange(4):
self.feedTM(sequence)
self.feedTM(sequence, num=10)
|
'A single endlessly repeating pattern'
| def testSingleEndlesslyRepeating(self):
| self.init({'columnDimensions': [1]})
sequence = [self.patternMachine.get(0)]
for _ in xrange(4):
self.feedTM(sequence)
for _ in xrange(2):
self.feedTM(sequence, num=10)
|
'Print a single vector succinctly.'
| def _printOneTrainingVector(self, x):
| print ''.join((('1' if (k != 0) else '.') for k in x))
|
'Print all vectors'
| def _printAllTrainingSequences(self, trainingSequences):
| for (i, trainingSequence) in enumerate(trainingSequences):
print '============= Sequence', i, '================='
for pattern in trainingSequence:
self._printOneTrainingVector(pattern)
|
'Set verbosity level on the TM'
| def _setVerbosity(self, verbosity, tm, tmPy):
| tm.cells4.setVerbosity(verbosity)
tm.verbosity = verbosity
tmPy.verbosity = verbosity
|
'Create an instance of the appropriate temporal memory. We isolate
all parameters as constants specified here.'
| def _createTMs(self, numCols, fixedResources=False, checkSynapseConsistency=True):
| minThreshold = 4
activationThreshold = 8
newSynapseCount = 15
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
if fixedResources:
permanenceDec = 0.1
maxSegmentsPerCell = 5
maxSynapsesPerSegment = 15
globalDecay = 0
maxAge = 0
else:
permanenceDec = 0.05
maxSegmentsPerCell = (-1)
maxSynapsesPerSegment = (-1)
globalDecay = 0.0001
maxAge = 1
if g_testCPPTM:
if (g_options.verbosity > 1):
print 'Creating BacktrackingTMCPP instance'
cppTM = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=4, initialPerm=initialPerm, connectedPerm=connectedPerm, minThreshold=minThreshold, newSynapseCount=newSynapseCount, permanenceInc=permanenceInc, permanenceDec=permanenceDec, activationThreshold=activationThreshold, globalDecay=globalDecay, maxAge=maxAge, burnIn=1, seed=g_options.seed, verbosity=g_options.verbosity, checkSynapseConsistency=checkSynapseConsistency, pamLength=1000, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment)
cppTM.retrieveLearningStates = True
else:
cppTM = None
if (g_options.verbosity > 1):
print 'Creating PY TM instance'
pyTM = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=4, initialPerm=initialPerm, connectedPerm=connectedPerm, minThreshold=minThreshold, newSynapseCount=newSynapseCount, permanenceInc=permanenceInc, permanenceDec=permanenceDec, activationThreshold=activationThreshold, globalDecay=globalDecay, maxAge=maxAge, burnIn=1, seed=g_options.seed, verbosity=g_options.verbosity, pamLength=1000, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment)
return (cppTM, pyTM)
|
'Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set.'
| def _getSimplePatterns(self, numOnes, numPatterns):
| numCols = (numOnes * numPatterns)
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[(i * numOnes):((i + 1) * numOnes)] = 1
p.append(x)
return p
|
'A simple sequence of 5 patterns. The left half of the vector contains
the pattern elements, each with numOnes consecutive bits. The right half
contains numOnes random bits. The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequence
testSequence: A single clean test sequence containing the 5 patterns
but with no noise on the right half'
| def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions=10):
| numPatterns = 5
numCols = ((2 * numPatterns) * numOnes)
halfCols = (numPatterns * numOnes)
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
trainingSequences = []
for i in xrange(numRepetitions):
sequence = []
for j in xrange(numPatterns):
v = numpy.zeros(numCols)
v[0:halfCols] = p[j]
noiseIndices = (self._rgen.permutation(halfCols) + halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
testSequence = []
for j in xrange(numPatterns):
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[j]
testSequence.append(v)
if (g_options.verbosity > 1):
print '\nTraining sequences'
self.printAllTrainingSequences(trainingSequences)
print '\nTest sequence'
self.printAllTrainingSequences([testSequence])
return (trainingSequences, [testSequence])
|
'Three simple sequences, composed of the same 5 static patterns. The left
half of the vector contains the pattern elements, each with numOnes
consecutive bits. The right half contains numOnes random bits.
Sequence 1 is: p0, p1, p2, p3, p4
Sequence 2 is: p4, p3, p2, p1, p0
Sequence 3 is: p2, p0, p4, p1, p3
The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequences
testSequence: Clean test sequences with no noise on the right half'
| def _buildSL2TrainingSet(self, numOnes=10, numRepetitions=10):
| numPatterns = 5
numCols = ((2 * numPatterns) * numOnes)
halfCols = (numPatterns * numOnes)
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
numSequences = 3
indices = [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [2, 0, 4, 1, 3]]
trainingSequences = []
for i in xrange((numRepetitions * numSequences)):
sequence = []
for j in xrange(numPatterns):
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[(i % numSequences)][j]]
noiseIndices = (self._rgen.permutation(halfCols) + halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
testSequences = []
for i in xrange(numSequences):
sequence = []
for j in xrange(numPatterns):
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[(i % numSequences)][j]]
sequence.append(v)
testSequences.append(sequence)
if (g_options.verbosity > 1):
print '\nTraining sequences'
self.printAllTrainingSequences(trainingSequences)
print '\nTest sequences'
self.printAllTrainingSequences(testSequences)
return (trainingSequences, testSequences)
|
'Train the given TM once on the entire training set. on the Test a single
set of sequences once and check that individual predictions reflect the true
relative frequencies. Return a success code. Success code is 1 for pass, 0
for fail.'
| def _testSegmentLearningSequence(self, tms, trainingSequences, testSequences, doResets=True):
| if (testSequences == None):
testSequences = trainingSequences
(cppTM, pyTM) = (tms[0], tms[1])
if (cppTM is not None):
assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True)
if (g_options.verbosity > 0):
print '============= Training ================='
print 'TM parameters:'
print 'CPP'
if (cppTM is not None):
print cppTM.printParameters()
print '\nPY'
print pyTM.printParameters()
for (sequenceNum, trainingSequence) in enumerate(trainingSequences):
if (g_options.verbosity > 1):
print '============= New sequence ================='
if doResets:
if (cppTM is not None):
cppTM.reset()
pyTM.reset()
for (t, x) in enumerate(trainingSequence):
if (g_options.verbosity > 1):
print 'Time step', t, 'sequence number', sequenceNum
print 'Input: ', pyTM.printInput(x)
print 'NNZ:', x.nonzero()
x = numpy.array(x).astype('float32')
if (cppTM is not None):
cppTM.learn(x)
pyTM.learn(x)
if (cppTM is not None):
assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity, relaxSegmentTests=False) == True)
if (g_options.verbosity > 2):
if (cppTM is not None):
print 'CPP'
cppTM.printStates(printPrevious=(g_options.verbosity > 4))
print '\nPY'
pyTM.printStates(printPrevious=(g_options.verbosity > 4))
print
if (g_options.verbosity > 4):
print 'Sequence finished. Complete state after sequence'
if (cppTM is not None):
print 'CPP'
cppTM.printCells()
print '\nPY'
pyTM.printCells()
print
if (g_options.verbosity > 2):
print 'Calling trim segments'
if (cppTM is not None):
(nSegsRemovedCPP, nSynsRemovedCPP) = cppTM.trimSegments()
(nSegsRemoved, nSynsRemoved) = pyTM.trimSegments()
if (cppTM is not None):
assert (nSegsRemovedCPP == nSegsRemoved)
assert (nSynsRemovedCPP == nSynsRemoved)
if (cppTM is not None):
assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True)
print 'Training completed. Stats:'
info = pyTM.getSegmentInfo()
print ' nSegments:', info[0]
print ' nSynapses:', info[1]
if (g_options.verbosity > 3):
print 'Complete state:'
if (cppTM is not None):
print 'CPP'
cppTM.printCells()
print '\nPY'
pyTM.printCells()
if (g_options.verbosity > 1):
print '============= Inference ================='
if (cppTM is not None):
cppTM.collectStats = True
pyTM.collectStats = True
nPredictions = 0
(cppNumCorrect, pyNumCorrect) = (0, 0)
for (sequenceNum, testSequence) in enumerate(testSequences):
if (g_options.verbosity > 1):
print '============= New sequence ================='
slen = len(testSequence)
if doResets:
if (cppTM is not None):
cppTM.reset()
pyTM.reset()
for (t, x) in enumerate(testSequence):
if (g_options.verbosity >= 2):
print 'Time step', t, '\nInput:'
pyTM.printInput(x)
if (cppTM is not None):
cppTM.infer(x)
pyTM.infer(x)
if (cppTM is not None):
assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True)
if (g_options.verbosity > 2):
if (cppTM is not None):
print 'CPP'
cppTM.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False)
print '\nPY'
pyTM.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False)
if (cppTM is not None):
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
if (g_options.verbosity >= 2):
if (cppTM is not None):
print 'CPP'
print cppScores
print '\nPY'
print pyScores
if ((t < (slen - 1)) and (t > pyTM.burnIn)):
nPredictions += 1
if (cppTM is not None):
if (cppScores['curPredictionScore2'] > 0.3):
cppNumCorrect += 1
if (pyScores['curPredictionScore2'] > 0.3):
pyNumCorrect += 1
if (cppTM is not None):
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
passTest = False
if (cppTM is not None):
if ((cppNumCorrect == nPredictions) and (pyNumCorrect == nPredictions)):
passTest = True
elif (pyNumCorrect == nPredictions):
passTest = True
if (not passTest):
print 'CPP correct predictions:', cppNumCorrect
print 'PY correct predictions:', pyNumCorrect
print 'Total predictions:', nPredictions
return passTest
|
'Test segment learning'
| def _testSL1(self, numOnes=10, numRepetitions=6, fixedResources=False, checkSynapseConsistency=True):
| if fixedResources:
testName = 'TestSL1_FS'
else:
testName = 'TestSL1'
print ('\nRunning %s...' % testName)
(trainingSet, testSet) = self._buildSegmentLearningTrainingSet(numOnes, numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols=numCols, fixedResources=fixedResources, checkSynapseConsistency=checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print ('%s PASS' % testName)
return 1
else:
print ('%s FAILED' % testName)
return 0
|
'Test segment learning'
| def _testSL2(self, numOnes=10, numRepetitions=10, fixedResources=False, checkSynapseConsistency=True):
| if fixedResources:
testName = 'TestSL2_FS'
else:
testName = 'TestSL2'
print ('\nRunning %s...' % testName)
(trainingSet, testSet) = self._buildSL2TrainingSet(numOnes, numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols=numCols, fixedResources=fixedResources, checkSynapseConsistency=checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print ('%s PASS' % testName)
return 1
else:
print ('%s FAILED' % testName)
return 0
|
'Test segment learning without fixed resources'
| def test_SL1NoFixedResources(self):
| self._testSL1(fixedResources=False, checkSynapseConsistency=g_options.long)
|
'Test segment learning with fixed resources'
| def test_SL1WithFixedResources(self):
| if (not g_options.long):
print ('Test %s only enabled with the --long option' % self._testMethodName)
return
self._testSL1(fixedResources=True, checkSynapseConsistency=g_options.long)
|
'Test segment learning without fixed resources'
| def test_SL2NoFixedResources(self):
| if (not g_options.long):
print ('Test %s only enabled with the --long option' % self._testMethodName)
return
self._testSL2(fixedResources=False, checkSynapseConsistency=g_options.long)
|
'Test segment learning with fixed resources'
| def test_SL2WithFixedResources(self):
| if (not g_options.long):
print ('Test %s only enabled with the --long option' % self._testMethodName)
return
self._testSL2(fixedResources=True, checkSynapseConsistency=g_options.long)
|
'Basic sequence learner. M=1, N=100, P=1.'
| def testB1(self):
| self.init()
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'N=300, M=1, P=1. (See how high we can go with N)'
| def testB3(self):
| self.init()
numbers = self.sequenceMachine.generateNumbers(1, 300)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'N=100, M=3, P=1. (See how high we can go with N*M)'
| def testB4(self):
| self.init()
numbers = self.sequenceMachine.generateNumbers(3, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
|
'Like B1 but with cellsPerColumn = 4.
First order sequences should still work just fine.'
| def testB5(self):
| self.init({'cellsPerColumn': 4})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'Like B4 but with cellsPerColumn = 4.
First order sequences should still work just fine.'
| def testB6(self):
| self.init({'cellsPerColumn': 4})
numbers = self.sequenceMachine.generateNumbers(3, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'Like B1 but with slower learning.
Set the following parameters differently:
initialPermanence = 0.2
connectedPermanence = 0.7
permanenceIncrement = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.'
| def testB7(self):
| self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(4):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'Like B7 but with 4 cells per column.
Should still work.'
| def testB8(self):
| self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2, 'cellsPerColumn': 4})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(4):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
|
'Like B7 but present the sequence less than 4 times.
The inference should be incorrect.'
| def testB9(self):
| self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(3):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWereUnpredicted()
|
'Like B5, but with activationThreshold = 8 and with each pattern
corrupted by a small amount of spatial noise (X = 0.05).'
| def testB11(self):
| self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
sequence = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self._testTM(sequence)
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTraceUnpredictedActiveColumns())
self.assertTrue((unpredictedActiveColumnsMetric.mean < 1))
|
'Learn two sequences with a short shared pattern.
Parameters should be the same as B1.
Since cellsPerColumn == 1, it should make more predictions than necessary.'
| def testH1(self):
| self.init()
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue((predictedInactiveColumnsMetric.mean > 0))
self.assertTrue((len(self.tm.mmGetTracePredictedInactiveColumns().data[15]) > 0))
self.assertTrue((len(self.tm.mmGetTracePredictedInactiveColumns().data[35]) > 0))
|
'Same as H1, but with cellsPerColumn == 4, and train multiple times.
It should make just the right number of predictions.'
| def testH2(self):
| self.init({'cellsPerColumn': 4})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(10):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue((predictedInactiveColumnsMetric.sum < 26))
self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[36]), 0)
|
'Like H2, except the shared subsequence is in the beginning.
(e.g. "ABCDEF" and "ABCGHIJ") At the point where the shared subsequence
ends, all possible next patterns should be predicted. As soon as you see
the first unique pattern, the predictions should collapse to be a perfect
prediction.'
| def testH3(self):
| self.init({'cellsPerColumn': 4})
numbers = self.sequenceMachine.generateNumbers(2, 20, (0, 5))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue((predictedInactiveColumnsMetric.sum < (26 * 2)))
self.assertTrue((len(self.tm.mmGetTracePredictedInactiveColumns().data[5]) > 0))
self.assertTrue((len(self.tm.mmGetTracePredictedInactiveColumns().data[25]) > 0))
|
'Shared patterns. Similar to H2 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence).'
| def testH4(self):
| self.init({'cellsPerColumn': 4})
numbers = []
for _ in xrange(2):
numbers += self.sequenceMachine.generateNumbers(1, 20)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(20):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue((predictedInactiveColumnsMetric.mean < 3))
|
'Combination of H4) and H2).
Shared patterns in different sequences, with a shared subsequence.'
| def testH5(self):
| self.init({'cellsPerColumn': 4})
numbers = []
shared = self.sequenceMachine.generateNumbers(1, 5)[:(-1)]
for _ in xrange(2):
sublist = self.sequenceMachine.generateNumbers(1, 20)
sublist = [x for x in sublist if (x not in xrange(5))]
numbers += ((sublist[0:10] + shared) + sublist[10:])
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(20):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue((predictedInactiveColumnsMetric.mean < 3))
|
'Sensitivity to small amounts of spatial noise during inference
(X = 0.05). Parameters the same as B11, and sequences like H2.'
| def testH9(self):
| self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(10):
self.feedTM(sequence)
sequence = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self._testTM(sequence)
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTraceUnpredictedActiveColumns())
self.assertTrue((unpredictedActiveColumnsMetric.mean < 3))
|
'Orphan Decay mechanism reduce predicted inactive cells (extra predictions).
Test feeds in noisy sequences (X = 0.05) to TM with and without orphan decay.
TM with orphan decay should has many fewer predicted inactive columns.
Parameters the same as B11, and sequences like H9.'
| def testH10(self):
| self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
sequenceNoisy = dict()
for i in xrange(10):
sequenceNoisy[i] = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self.feedTM(sequenceNoisy[i])
self.tm.mmClearHistory()
self._testTM(sequence)
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedActiveColumns())
predictedInactiveColumnsMeanNoOrphanDecay = predictedInactiveColumnsMetric.mean
predictedActiveColumnsMeanNoOrphanDecay = predictedActiveColumnsMetric.mean
self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8, 'predictedSegmentDecrement': 0.04})
for i in xrange(10):
self.feedTM(sequenceNoisy[i])
self.tm.mmClearHistory()
self._testTM(sequence)
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedInactiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGetTracePredictedActiveColumns())
predictedInactiveColumnsMeanOrphanDecay = predictedInactiveColumnsMetric.mean
predictedActiveColumnsMeanOrphanDecay = predictedActiveColumnsMetric.mean
self.assertGreater(predictedInactiveColumnsMeanNoOrphanDecay, 0)
self.assertGreater(predictedInactiveColumnsMeanNoOrphanDecay, predictedInactiveColumnsMeanOrphanDecay)
self.assertAlmostEqual(predictedActiveColumnsMeanNoOrphanDecay, predictedActiveColumnsMeanOrphanDecay)
|
'experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory'
| def getOpfExperimentPath(self, experimentName):
| path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), ("Experiment path %s doesn't exist or is not a directory" % (path,))
return path
|
'Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.'
| def setUp(self):
| global g_myEnv
if (not g_myEnv):
g_myEnv = MyTestEnvironment()
|
'Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.'
| def tearDown(self):
| self.resetExtraLogItems()
|
'Override to force unittest framework to use test method names instead
of docstrings in the report.'
| def shortDescription(self):
| return None
|
'Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs'
| def executePositiveOpfExperiment(self, experimentName, short=False):
| opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner, experimentDirPath=opfExpDir, short=short)
return r
|
'Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs'
| def __executePositiveRunExperimentTest(self, runnerPath, experimentDirPath, customOptions=[], short=False):
| command = ['python', runnerPath, experimentDirPath]
command.extend(customOptions)
if short:
command.append('--testMode')
self.addExtraLogItem({'command': command})
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result': r})
_debugOut(('_executeExternalCmdAndReapOutputs(%s)=%s' % (command, r)))
self.assertEqual(r['exitStatus'], 0, ('Expected status = 0 from %s; got: %s' % (runnerPath, r['exitStatus'])))
self.resetExtraLogItems()
return r
|
'Method called to prepare the test fixture. This is called by the
unittest framework immediately before calling the test method; any exception
raised by this method will be considered an error rather than a test
failure. The default implementation does nothing.'
| def setUp(self):
| global g_myEnv
if (not g_myEnv):
params = type('obj', (object,), {'installDir': resource_filename('nupic', '')})
g_myEnv = MyTestEnvironment(params)
|
'Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.'
| def tearDown(self):
| self.resetExtraLogItems()
g_myEnv.cleanUp()
|
'Override to force unittest framework to use test method names instead
of docstrings in the report.'
| def shortDescription(self):
| return None
|
'This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Returns the loaded base description module and permutations module
Parameters:
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use (\'v2\'; \'v1\' was dropped)
retval: (baseModule, permutationsModule)'
| def getModules(self, expDesc, hsVersion='v2'):
| shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True)
args = [('--description=%s' % json.dumps(expDesc)), ('--outDir=%s' % g_myEnv.testOutDir), ('--version=%s' % hsVersion)]
self.addExtraLogItem({'args': args})
experiment_generator.expGenerator(args)
descriptionPyPath = os.path.join(g_myEnv.testOutDir, 'description.py')
permutationsPyPath = os.path.join(g_myEnv.testOutDir, 'permutations.py')
return (self.checkPythonScript(descriptionPyPath), self.checkPythonScript(permutationsPyPath))
|
'This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Runs the base description.py as an experiment using OPF RunExperiment.
4.) Runs a Hypersearch using the generated permutations.py by passing it
to HypersearchWorker.
Parameters:
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use (\'v2\'; \'v1\' was dropped)
retval: list of model results'
| def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=2):
| self.getModules(expDesc, hsVersion=hsVersion)
permutationsPyPath = os.path.join(g_myEnv.testOutDir, 'permutations.py')
args = [g_myEnv.testOutDir]
from nupic.frameworks.opf.experiment_runner import runExperiment
LOGGER.info('')
LOGGER.info('============================================================')
LOGGER.info('RUNNING EXPERIMENT')
LOGGER.info('============================================================')
runExperiment(args)
jobParams = {'persistentJobGUID': generatePersistentJobGUID(), 'permutationsPyFilename': permutationsPyPath, 'hsVersion': hsVersion}
if (maxModels is not None):
jobParams['maxModels'] = maxModels
args = ['ignoreThis', ('--params=%s' % json.dumps(jobParams))]
self.resetExtraLogItems()
self.addExtraLogItem({'params': jobParams})
LOGGER.info('')
LOGGER.info('============================================================')
LOGGER.info('RUNNING PERMUTATIONS')
LOGGER.info('============================================================')
jobID = hypersearch_worker.main(args)
cjDAO = ClientJobsDAO.get()
models = cjDAO.modelsGetUpdateCounters(jobID)
modelIDs = [model.modelId for model in models]
results = cjDAO.modelsGetResultAndStatus(modelIDs)
if (maxModels is not None):
self.assertEqual(len(results), maxModels, ('Expected to get %d model results but only got %d' % (maxModels, len(results))))
for result in results:
self.assertEqual(result.completionReason, cjDAO.CMPL_REASON_EOF, ('Model did not complete successfully:\n%s' % result.completionMsg))
return results
|
'Test that the set of aggregations produced for a swarm are correct
Parameters:
expDesc: JSON experiment description
expectedAttempts: list of (minAggregationMultiple, predictionSteps) pairs
that we expect to find in the aggregation choices.'
| def assertValidSwarmingAggregations(self, expDesc, expectedAttempts):
| minAggregation = dict(expDesc['streamDef']['aggregation'])
minAggregation.pop('fields')
(base, perms) = self.getModules(expDesc)
predictionSteps = expDesc['inferenceArgs']['predictionSteps'][0]
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSteps'])
tmpAggregationInfo = rCopy(base.config['aggregationInfo'], (lambda value, _: value))
tmpAggregationInfo.pop('fields')
self.assertDictEqual(tmpAggregationInfo, minAggregation)
predictAheadTime = dict(minAggregation)
for key in predictAheadTime.iterkeys():
predictAheadTime[key] *= predictionSteps
self.assertEqual(base.config['predictAheadTime'], predictAheadTime)
self.assertEqual(perms.minimize, "multiStepBestPredictions:multiStep:errorMetric='altMAPE':steps=\\[.*\\]:window=1000:field=consumption")
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) for metric in metrics]
self.assertIn(('multiStep', 'multiStepBestPredictions', {'window': 1000, 'steps': [predictionSteps], 'errorMetric': 'altMAPE'}), metricTuples)
aggPeriods = perms.permutations['aggregationInfo']
aggAttempts = []
for agg in aggPeriods.choices:
multipleOfMinAgg = aggregationDivide(agg, minAggregation)
self.assertIsInt(multipleOfMinAgg, ('invalid aggregation period %s is not an integer multipleof minAggregation (%s)' % (agg, minAggregation)))
self.assertGreaterEqual(int(round(multipleOfMinAgg)), 1, ('invalid aggregation period %s is not >= minAggregation (%s)' % (agg, minAggregation)))
requiredSteps = aggregationDivide(predictAheadTime, agg)
self.assertIsInt(requiredSteps, ('invalid aggregation period %s is not an integer factorof predictAheadTime (%s)' % (agg, predictAheadTime)))
self.assertGreaterEqual(int(round(requiredSteps)), 1, ('invalid aggregation period %s greater than predictAheadTime (%s)' % (agg, predictAheadTime)))
quotient = aggregationDivide(expDesc['computeInterval'], agg)
self.assertIsInt(quotient, ('invalid aggregation period %s is not an integer factorof computeInterval (%s)' % (agg, expDesc['computeInterval'])))
self.assertGreaterEqual(int(round(quotient)), 1, ('Invalid aggregation period %s is greater than the computeInterval %s' % (agg, expDesc['computeInterval'])))
aggAttempts.append((int(round(multipleOfMinAgg)), int(requiredSteps)))
LOGGER.info('This swarm will try the following (minAggregationMultiple, predictionSteps) combinations: %s', aggAttempts)
aggAttempts.sort()
expectedAttempts.sort()
self.assertEqual(aggAttempts, expectedAttempts, ('Expected this swarm to try the following (minAggMultiple, predictionSteps) attempts: %s, but instead it is going to try: %s' % (expectedAttempts, aggAttempts)))
|
'Test showing the schema'
| def test_ShowSchema(self):
| args = ['--showSchema']
self.addExtraLogItem({'args': args})
experiment_generator.expGenerator(args)
return
|
'Test correct behavior in response to different settings in the
prediction element'
| def test_PredictionElement(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'inferenceType': 'MultiStep', 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [1]}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 200}], 'resetPeriod': {'days': 1, 'hours': 12}, 'iterationCount': 10}
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.minimize, ("multiStepBestPredictions:multiStep:errorMetric='altMAPE':steps=\\[1\\]:window=%d:field=consumption" % experiment_generator.METRIC_WINDOW), msg=('got: %s' % perms.minimize))
self.assertNotIn('clAlpha', perms.permutations)
return
|
'Test to make sure that the correct metrics are generated'
| def test_Metrics(self):
| streamDef = dict(version=1, info='test_category_predicted_field', streams=[dict(source='file://dummy', info='dummy.csv', columns=['*'])])
expDesc = {'inferenceType': 'MultiStep', 'inferenceArgs': {'predictedField': 'playType', 'predictionSteps': [1]}, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'address', 'fieldType': 'string'}, {'fieldName': 'ydsToGo', 'fieldType': 'float'}, {'fieldName': 'playType', 'fieldType': 'string'}]}
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'], 'avg_err', 'moving_mode', 'one_gram', InferenceElement.prediction, 'trivial')
self.assertEqual(base.control['loggedMetrics'][0], '.*')
expDesc['inferenceArgs']['predictedField'] = 'ydsToGo'
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'], 'altMAPE', 'moving_mean', 'one_gram', InferenceElement.encodings, 'trivial')
self.assertEqual(base.control['loggedMetrics'][0], '.*')
|
'Test correct behavior in response to different settings in the
includedFields element'
| def test_IncludedFields(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'gym', 'fieldType': 'string'}, {'fieldName': 'address', 'fieldType': 'string'}, {'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'resetPeriod': {'days': 1, 'hours': 12}, 'iterationCount': 10}
(base, _perms) = self.getModules(expDesc)
actEncoderFields = set()
actEncoderNames = set()
for (_, encoder) in base.config['modelParams']['sensorParams']['encoders'].iteritems():
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
self.assertEqual(actEncoderFields, set(['gym', 'address', 'timestamp', 'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'address', 'timestamp_timeOfDay', 'timestamp_dayOfWeek', 'timestamp_weekend', 'consumption']))
expDesc['includedFields'] = [{'fieldName': 'gym', 'fieldType': 'string'}, {'fieldName': 'consumption', 'fieldType': 'float'}]
(base, _perms) = self.getModules(expDesc)
actEncoderFields = set()
actEncoderNames = set()
for (_, encoder) in base.config['modelParams']['sensorParams']['encoders'].iteritems():
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
self.assertEqual(actEncoderFields, set(['gym', 'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'consumption']))
expDesc['includedFields'] = [{'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 42, 'maxValue': 42.42}]
(base, _perms) = self.getModules(expDesc)
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for (_, encoder) in base.config['modelParams']['sensorParams']['encoders'].iteritems():
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
self.assertEqual(actEncoderTypes, set(['ScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
expDesc['includedFields'] = [{'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 42, 'maxValue': 42.42, 'encoderType': 'AdaptiveScalarEncoder'}]
(base, _perms) = self.getModules(expDesc)
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for (_, encoder) in base.config['modelParams']['sensorParams']['encoders'].iteritems():
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
self.assertEqual(actEncoderTypes, set(['AdaptiveScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
characters = string.punctuation
expDesc['includedFields'] = ([{'fieldName': ((char + 'helloField') + char), 'fieldType': 'float'} for char in characters] + [{'fieldName': 'consumption', 'fieldType': 'float'}])
try:
(base, _perms) = self.getModules(expDesc)
except:
LOGGER.info('Passed: Threw exception for bad fieldname.')
characters = characters.replace('\\', '')
return
|
'Test that aggregation gets pulled out of the streamDef as it should'
| def test_Aggregation(self):
| streamDef = dict(version=1, info='TestAggregation', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])], aggregation={'years': 1, 'months': 2, 'weeks': 3, 'days': 4, 'hours': 5, 'minutes': 6, 'seconds': 7, 'milliseconds': 8, 'microseconds': 9, 'fields': [('consumption', 'sum'), ('gym', 'first')]}, sequenceIdField='gym', providers={'order': ['weather'], 'weather': {'locationField': 'address', 'providerType': 'NamedProvider', 'timestampField': 'timestamp', 'weatherTypes': ['TEMP']}})
expDesc = {'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'gym', 'fieldType': 'string'}, {'fieldName': 'consumption', 'fieldType': 'float'}, {'fieldName': 'TEMP', 'fieldType': 'float', 'minValue': (-30.0), 'maxValue': 120.0}], 'iterationCount': 10, 'resetPeriod': {'days': 1, 'hours': 12}}
(base, _perms) = self.getModules(expDesc)
aggInfo = base.config['aggregationInfo']
aggInfo['fields'].sort()
streamDef['aggregation']['fields'].sort()
self.assertEqual(aggInfo, streamDef['aggregation'])
expDesc['streamDef'].pop('aggregation')
(base, _perms) = self.getModules(expDesc)
aggInfo = base.config['aggregationInfo']
expAggInfo = {'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 0, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, 'fields': []}
aggInfo['fields'].sort()
expAggInfo['fields'].sort()
self.assertEqual(aggInfo, expAggInfo)
return
|
'Test that reset period gets handled correctly'
| def test_ResetPeriod(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'gym', 'fieldType': 'string'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'iterationCount': 10, 'resetPeriod': {'weeks': 3, 'days': 4, 'hours': 5, 'minutes': 6, 'seconds': 7, 'milliseconds': 8, 'microseconds': 9}}
(base, _perms) = self.getModules(expDesc)
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, expDesc['resetPeriod'])
expDesc.pop('resetPeriod')
(base, _perms) = self.getModules(expDesc)
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, None)
return
|
'Try running a basic Hypersearch V2 experiment and permutations'
| def test_RunningExperimentHSv2(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'inferenceType': 'TemporalMultiStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 200}], 'resetPeriod': {'days': 1, 'hours': 12}, 'iterationCount': 10}
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
return
|
'Test the we correctly generate a multi-step prediction experiment'
| def test_MultiStep(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'], last_record=20)], aggregation={'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 1, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, 'fields': [('consumption', 'sum'), ('gym', 'first'), ('timestamp', 'first')]})
expDesc = {'environment': OpfEnvironment.Nupic, 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [1, 5]}, 'inferenceType': 'MultiStep', 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'iterationCount': (-1), 'runBaselines': True}
(base, perms) = self.getModules(expDesc)
print "base.config['modelParams']:"
pprint.pprint(base.config['modelParams'])
print 'perms.permutations'
pprint.pprint(perms.permutations)
print 'perms.minimize'
pprint.pprint(perms.minimize)
print 'expDesc'
pprint.pprint(expDesc)
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'], 'TemporalMultiStep')
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']['_classifierInput']['fieldname'], expDesc['inferenceArgs']['predictedField'])
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize, ("multiStepBestPredictions:multiStep:errorMetric='altMAPE':" + 'steps=\\[1, 5\\]:window=1000:field=consumption'))
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertIn('_classifierInput', perms.permutations['modelParams']['sensorParams']['encoders'])
self.assertEqual(perms.inputPredictedField, 'auto')
self.assertIn('activationThreshold', perms.permutations['modelParams']['tmParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tmParams'])
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) for metric in metrics]
self.assertIn(('multiStep', 'multiStepBestPredictions', {'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}), metricTuples)
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['predictionSteps'] = [5, 1]
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.minimize, ("multiStepBestPredictions:multiStep:errorMetric='altMAPE':" + 'steps=\\[5, 1\\]:window=1000:field=consumption'))
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalMultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'], expDesc2['inferenceType'])
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertNotIn('activationThreshold', perms.permutations['modelParams']['tmParams'])
self.assertNotIn('minThreshold', perms.permutations['modelParams']['tmParams'])
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) for metric in metrics]
self.assertIn(('multiStep', 'multiStepBestPredictions', {'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}), metricTuples)
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'MultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'], 'TemporalMultiStep')
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertIn('activationThreshold', perms.permutations['modelParams']['tmParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tmParams'])
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) for metric in metrics]
self.assertIn(('multiStep', 'multiStepBestPredictions', {'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}), metricTuples)
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['inputPredictedField'] = 'yes'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, 'yes')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['inputPredictedField'] = 'no'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, 'no')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['inputPredictedField'] = 'auto'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, 'auto')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['inputPredictedField'] = 'no'
(base, perms) = self.getModules(expDesc2)
self.assertNotIn('consumption', base.config['modelParams']['sensorParams']['encoders'].keys())
|
'Test the we correctly generate a multi-step prediction experiment that
uses aggregation swarming'
| def test_AggregationSwarming(self):
| minAggregation = {'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 0, 'minutes': 15, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0}
streamAggregation = dict(minAggregation)
streamAggregation.update({'fields': [('consumption', 'sum'), ('gym', 'first'), ('timestamp', 'first')]})
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'], last_record=10)], aggregation=streamAggregation)
expDesc = {'environment': OpfEnvironment.Nupic, 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [24]}, 'inferenceType': 'TemporalMultiStep', 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'iterationCount': (-1), 'runBaselines': False, 'computeInterval': {'hours': 2}}
self.assertValidSwarmingAggregations(expDesc=expDesc, expectedAttempts=[(1, 24), (2, 12), (4, 6), (8, 3)])
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 1
expDescTmp['inferenceArgs']['predictionSteps'] = [((4 * 60) / 1)]
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(24, 10), (30, 8), (40, 6), (60, 4), (120, 2)])
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 3
expDescTmp['inferenceArgs']['predictionSteps'] = [16]
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4)])
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 2
expDescTmp['inferenceArgs']['predictionSteps'] = [16]
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4), (8, 2)])
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 1
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info('Got expected exception: %s', cm.exception)
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 25
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info('Got expected exception: %s', cm.exception)
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['inferenceArgs']['predictionSteps'] = [1, 16]
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info('Got expected exception: %s', cm.exception)
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 0
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc=expDescTmp, expectedAttempts=[(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info('Got expected exception: %s', cm.exception)
|
'Test correct behavior in response to different settings in the
swarmSize element'
| def test_SwarmSize(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 200}], 'resetPeriod': {'days': 1, 'hours': 12}}
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], (-1), msg=('got: %s' % base.control['iterationCount']))
self.assertEqual(perms.minParticlesPerSwarm, 15, msg=('got: %s' % perms.minParticlesPerSwarm))
self.assertEqual(perms.tryAll3FieldCombinationsWTimestamps, True, msg=('got: %s' % perms.tryAll3FieldCombinationsWTimestamps))
self.assertFalse(hasattr(perms, 'maxModels'))
self.assertEqual(perms.inputPredictedField, 'auto')
expDesc['swarmSize'] = 'medium'
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 4000, msg=('got: %s' % base.control['iterationCount']))
self.assertEqual(perms.minParticlesPerSwarm, 5, msg=('got: %s' % perms.minParticlesPerSwarm))
self.assertEqual(perms.maxModels, 200, msg=('got: %s' % perms.maxModels))
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
self.assertEqual(perms.inputPredictedField, 'auto')
expDesc['swarmSize'] = 'small'
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 100, msg=('got: %s' % base.control['iterationCount']))
self.assertEqual(perms.minParticlesPerSwarm, 3, msg=('got: %s' % perms.minParticlesPerSwarm))
self.assertEqual(perms.maxModels, 1, msg=('got: %s' % perms.maxModels))
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
self.assertEqual(perms.inputPredictedField, 'yes')
expDesc['swarmSize'] = 'small'
expDesc['minParticlesPerSwarm'] = 2
expDesc['iterationCount'] = 42
expDesc['inferenceArgs']['inputPredictedField'] = 'auto'
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 42, msg=('got: %s' % base.control['iterationCount']))
self.assertEqual(perms.minParticlesPerSwarm, 2, msg=('got: %s' % perms.minParticlesPerSwarm))
self.assertEqual(perms.maxModels, 1, msg=('got: %s' % perms.maxModels))
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
self.assertEqual(perms.inputPredictedField, 'auto')
modelResults = self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2', maxModels=None)
self.assertEqual(len(modelResults), 1, ('Expected to get %d model results but only got %d' % (1, len(modelResults))))
|
'Test correct behavior in response to setting the fixedFields swarming
option.'
| def test_FixedFields(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 200}], 'resetPeriod': {'days': 1, 'hours': 12}, 'fixedFields': ['consumption', 'timestamp']}
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fixedFields, ['consumption', 'timestamp'], msg=('got: %s' % perms.fixedFields))
expDesc.pop('fixedFields')
(_base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fixedFields'))
|
'Test correct behavior in response to setting the fastSwarmModelParams
swarming option.'
| def test_FastSwarmModelParams(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
fastSwarmModelParams = {'this is': 'a test'}
expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float', 'minValue': 0, 'maxValue': 200}], 'resetPeriod': {'days': 1, 'hours': 12}, 'fastSwarmModelParams': fastSwarmModelParams}
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fastSwarmModelParams, fastSwarmModelParams, msg=('got: %s' % perms.fastSwarmModelParams))
expDesc.pop('fastSwarmModelParams')
(base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fastSwarmModelParams'))
|
'Test correct behavior in response to setting the anomalyParams
experiment description options'
| def test_AnomalyParams(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])])
expDesc = {'environment': OpfEnvironment.Nupic, 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [1]}, 'inferenceType': 'TemporalAnomaly', 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'iterationCount': (-1), 'anomalyParams': {'autoDetectThreshold': 1.1, 'autoDetectWaitRecords': 0, 'anomalyCacheRecords': 10}}
(base, _perms) = self.getModules(expDesc)
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'], expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'], expDesc['anomalyParams'])
expDesc['inferenceType'] = 'TemporalNextStep'
(base, _perms) = self.getModules(expDesc)
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'], expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'], expDesc['anomalyParams'])
|
'Test the we correctly generate a Nontemporal classification experiment'
| def test_NontemporalClassification(self):
| streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'], last_record=10)], aggregation={'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 1, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, 'fields': [('consumption', 'sum'), ('gym', 'first'), ('timestamp', 'first')]})
expDesc = {'environment': OpfEnvironment.Nupic, 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [0]}, 'inferenceType': 'TemporalMultiStep', 'streamDef': streamDef, 'includedFields': [{'fieldName': 'timestamp', 'fieldType': 'datetime'}, {'fieldName': 'consumption', 'fieldType': 'float'}], 'iterationCount': (-1), 'runBaselines': True}
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'], expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'], InferenceType.NontemporalClassification)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']['_classifierInput']['fieldname'], expDesc['inferenceArgs']['predictedField'])
self.assertNotIn('consumption', base.config['modelParams']['sensorParams']['encoders'].keys())
self.assertFalse(base.config['modelParams']['spEnable'])
self.assertFalse(base.config['modelParams']['tmEnable'])
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize, ("multiStepBestPredictions:multiStep:errorMetric='altMAPE':" + 'steps=\\[0\\]:window=1000:field=consumption'))
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertEqual(perms.permutations['modelParams']['tmParams'], {})
self.assertEqual(perms.permutations['modelParams']['spParams'], {})
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) for metric in metrics]
self.assertIn(('multiStep', 'multiStepBestPredictions', {'window': 1000, 'steps': [0], 'errorMetric': 'aae'}), metricTuples)
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
(newBase, _newPerms) = self.getModules(expDesc2)
self.assertEqual(base.config, newBase.config)
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
expDesc2['inferenceArgs']['predictionSteps'] = [1]
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['inputPredictedField'] = 'yes'
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
return
|
'This function tests saving and loading. It will train a network for 500
iterations, then save it and reload it as a second network instance. It will
then run both networks for 100 iterations and ensure they return identical
results.'
| def testSaveAndReload(self):
| print 'Creating network...'
netOPF = _createOPFNetwork()
level1OPF = netOPF.regions['level1SP']
print 'Training network for 500 iterations'
level1OPF.setParameter('learningMode', 1)
level1OPF.setParameter('inferenceMode', 0)
netOPF.run(500)
level1OPF.setParameter('learningMode', 0)
level1OPF.setParameter('inferenceMode', 1)
print 'Saving and reload network'
(_, tmpNetworkFilename) = _setupTempDirectory('trained.nta')
netOPF.save(tmpNetworkFilename)
netOPF2 = Network(tmpNetworkFilename)
level1OPF2 = netOPF2.regions['level1SP']
sensor = netOPF.regions['sensor'].getSelf()
trainFile = resource_filename('nupic.datafiles', 'extra/gym/gym.csv')
sensor.dataSource = FileRecordStream(streamID=trainFile)
sensor.dataSource.setAutoRewind(True)
print 'Running inference on the two networks for 100 iterations'
for _ in xrange(100):
netOPF2.run(1)
netOPF.run(1)
l1outputOPF2 = level1OPF2.getOutputData('bottomUpOut')
l1outputOPF = level1OPF.getOutputData('bottomUpOut')
opfHash2 = l1outputOPF2.nonzero()[0].sum()
opfHash = l1outputOPF.nonzero()[0].sum()
self.assertEqual(opfHash2, opfHash)
|
'Test maxEnabledPhase'
| def testMaxEnabledPhase(self):
| print 'Creating network...'
netOPF = _createOPFNetwork(addSP=True, addTP=True)
netOPF.initialize()
level1SP = netOPF.regions['level1SP']
level1SP.setParameter('learningMode', 1)
level1SP.setParameter('inferenceMode', 0)
tm = netOPF.regions['level1TP']
tm.setParameter('learningMode', 0)
tm.setParameter('inferenceMode', 0)
print 'maxPhase,maxEnabledPhase = ', netOPF.maxPhase, netOPF.getMaxEnabledPhase()
self.assertEqual(netOPF.maxPhase, 2)
self.assertEqual(netOPF.getMaxEnabledPhase(), 2)
print 'Setting setMaxEnabledPhase to 1'
netOPF.setMaxEnabledPhase(1)
print 'maxPhase,maxEnabledPhase = ', netOPF.maxPhase, netOPF.getMaxEnabledPhase()
self.assertEqual(netOPF.maxPhase, 2)
self.assertEqual(netOPF.getMaxEnabledPhase(), 1)
netOPF.run(1)
print 'RUN SUCCEEDED'
'\n print "\nSetting setMaxEnabledPhase to 2"\n netOPF.setMaxEnabledPhase(2)\n print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, netOPF.getMaxEnabledPhase()\n netOPF.run(1)\n\n print "RUN SUCCEEDED"\n\n print "\nSetting setMaxEnabledPhase to 1"\n netOPF.setMaxEnabledPhase(1)\n print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, netOPF.getMaxEnabledPhase()\n netOPF.run(1)\n print "RUN SUCCEEDED"\n '
|
'Run specific experiments and verify that they are producing the correct
results.
opfDir is the examples/opf directory in the install path
and is used to find run_opf_experiment.py
The testdir is the directory that contains the experiments we will be
running. When running in the auto-build setup, this will be a temporary
directory that has had this script, as well as the specific experiments
we will be running, copied into it by the qa/autotest/prediction_results.py
script.
When running stand-alone from the command line, this will point to the
examples/prediction directory in the install tree (same as predictionDir)'
| def testExperimentResults(self):
| nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', '..')
opfDir = os.path.join(nupic_dir, 'examples', 'opf')
testDir = opfDir
if (not os.path.exists(os.path.join(testDir, 'experiments/classification'))):
testDir = opfDir
command = ['python', os.path.join(testDir, 'experiments', 'classification', 'makeDatasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
command = ['python', os.path.join(testDir, 'experiments', 'multistep', 'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
command = ['python', os.path.join(testDir, 'experiments', 'spatial_classification', 'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
os.chdir(testDir)
runExperiment = os.path.join(nupic_dir, 'scripts', 'run_opf_experiment.py')
multistepTests = [{'experimentDir': 'experiments/multistep/simple_0', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"): (0.0, 0.2)}}, {'experimentDir': 'experiments/multistep/simple_0_f2', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"): (0.0, 0.66)}}, {'experimentDir': 'experiments/multistep/simple_1', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"): (0.0, 0.2)}}, {'experimentDir': 'experiments/multistep/simple_1_f2', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"): (0.0, 3.76)}}, {'experimentDir': 'experiments/multistep/simple_2', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"): (0.0, 0.31)}}, {'experimentDir': 'experiments/multistep/simple_3', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"): (0.0, 0.06), ('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=3:window=200:field=field1"): (0.0, 0.2)}}, {'experimentDir': 'experiments/multistep/simple_3_f2', 'results': {('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"): (0.0, 0.6), ('DefaultTask.TemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"): (0.0, 1.8)}}, {'experimentDir': 'experiments/missing_record/simple_0', 'results': {('DefaultTask.NontemporalMultiStep.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=25:field=field1"): (1.0, 1.0)}}]
classificationTests = [{'experimentDir': 'experiments/classification/category_hub_TP_0', 'results': {('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.02)}}, {'experimentDir': 'experiments/classification/category_TM_0', 'results': {('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.045), ('OnlineLearning.TemporalClassification.predictionLog.csv', 'classConfidences:neg_auc:computeEvery=10:window=200'): ((-1.0), (-0.98))}}, {'experimentDir': 'experiments/classification/category_TM_1', 'results': {('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.005)}}, {'experimentDir': 'experiments/classification/scalar_TP_0', 'results': {('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.155), ('OnlineLearning.TemporalClassification.predictionLog.csv', 'classConfidences:neg_auc:computeEvery=10:window=200'): ((-1.0), (-0.9))}}, {'experimentDir': 'experiments/classification/scalar_TP_1', 'results': {('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.03)}}]
spatialClassificationTests = [{'experimentDir': 'experiments/spatial_classification/category_0', 'results': {('DefaultTask.NontemporalClassification.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"): (0.0, 0.05)}}, {'experimentDir': 'experiments/spatial_classification/category_1', 'results': {('DefaultTask.NontemporalClassification.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"): (0.0, 0.0)}}, {'experimentDir': 'experiments/spatial_classification/scalar_0', 'results': {('DefaultTask.NontemporalClassification.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"): (0.0, 0.025)}}, {'experimentDir': 'experiments/spatial_classification/scalar_1', 'results': {('DefaultTask.NontemporalClassification.predictionLog.csv', "multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"): ((-1e-10), 0.01)}}]
anomalyTests = [{'experimentDir': 'experiments/anomaly/temporal/simple', 'results': {('DefaultTask.TemporalAnomaly.predictionLog.csv', 'anomalyScore:passThruPrediction:window=1000:field=f'): (0.02, 0.04)}}]
tests = []
tests += multistepTests
tests += classificationTests
tests += spatialClassificationTests
tests += anomalyTests
summaryOfResults = []
startTime = time.time()
testIdx = (-1)
for test in tests:
testIdx += 1
expDirectory = test['experimentDir']
toDelete = []
path = os.path.join(expDirectory, 'inference')
toDelete.append(path)
path = os.path.join(expDirectory, 'savedmodels')
toDelete.append(path)
for path in toDelete:
if (not os.path.exists(path)):
continue
print ('Removing %s ...' % path)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
args = test.get('args', [])
print ('Running experiment %s ...' % expDirectory)
command = (['python', runExperiment, expDirectory] + args)
retVal = call(command)
if retVal:
print ('Details of failed test: %s' % test)
print ("TestIdx %d, OPF experiment '%s' failed with return code %i." % (testIdx, expDirectory, retVal))
self.assertFalse(retVal)
for (key, expValues) in test['results'].items():
(logFilename, colName) = key
logFile = FileRecordStream(os.path.join(expDirectory, 'inference', logFilename))
colNames = [x[0] for x in logFile.getFields()]
if (not (colName in colNames)):
print ('TestIdx %d: %s not one of the columns in prediction log file. Available column names are: %s' % (testIdx, colName, colNames))
self.assertTrue((colName in colNames))
colIndex = colNames.index(colName)
while True:
try:
row = logFile.next()
except StopIteration:
break
result = row[colIndex]
summaryOfResults.append((expDirectory, colName, result))
print ('Actual result for %s, %s:' % (expDirectory, colName)), result
print 'Expected range:', expValues
failed = (((expValues[0] is not None) and (result < expValues[0])) or ((expValues[1] is not None) and (result > expValues[1])))
if failed:
print ('TestIdx %d: Experiment %s failed. \nThe actual result for %s (%s) was outside the allowed range of %s' % (testIdx, expDirectory, colName, result, expValues))
else:
print ' Within expected range.'
self.assertFalse(failed)
print
print 'Summary of results in all experiments run:'
print '========================================='
prevExpDir = None
for (expDir, key, results) in summaryOfResults:
if (expDir != prevExpDir):
print
print expDir
prevExpDir = expDir
print (' %s: %s' % (key, results))
print ('\nElapsed time: %.1f seconds' % (time.time() - startTime))
|
'Override to force unittest framework to use test method names instead
of docstrings in the report.'
| def shortDescription(self):
| return None
|
'Compare temporal or non-temporal predictions for the given experiment
that just finished executing
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiments.
maxMismatches: Maximum number of row mismatches to report before
terminating the comparison; None means: report all
mismatches
Returns: True if equal; False if different'
| def compareOPFPredictionFiles(self, path1, path2, temporal, maxMismatches=None):
| experimentLabel = ('%s prediction comparison' % ('Temporal' if temporal else 'Non-Temporal'))
print ('%s: Performing comparison of OPF prediction CSV files %r and %r' % (experimentLabel, path1, path2))
self.assertTrue(os.path.isfile(path1), msg=("OPF prediction file path1 %s doesn't exist or is not a file" % path1))
(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)
self.assertTrue(os.path.isfile(path2), msg=("OPF prediction file path2 %s doesn't exist or is not a file" % path2))
(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)
self.assertEqual(len(opf1FieldNames), len(opf2FieldNames), ('%s: Mismatch in number of prediction columns: opf1: %s, opf2: %s' % (experimentLabel, len(opf1FieldNames), len(opf2FieldNames))))
self.assertEqual(opf1FieldNames, opf2FieldNames)
opf1EOF = False
opf2EOF = False
opf1CurrentDataRowIndex = (-1)
opf2CurrentDataRowIndex = (-1)
if temporal:
_skipOpf1Row = opf1CsvReader.next()
opf1CurrentDataRowIndex += 1
_skipOpf2Row = opf2CsvReader.next()
opf2CurrentDataRowIndex += 1
fieldsIndexesToCompare = tuple(xrange(2, len(opf1FieldNames), 2))
self.assertGreater(len(fieldsIndexesToCompare), 0)
print ('%s: Comparing fields at indexes: %s; opf1Labels: %s; opf2Labels: %s' % (experimentLabel, fieldsIndexesToCompare, [opf1FieldNames[i] for i in fieldsIndexesToCompare], [opf2FieldNames[i] for i in fieldsIndexesToCompare]))
for i in fieldsIndexesToCompare:
self.assertTrue(opf1FieldNames[i].endswith('predicted'), msg=("%r doesn't end with 'predicted'" % opf1FieldNames[i]))
self.assertTrue(opf2FieldNames[i].endswith('predicted'), msg=("%r doesn't end with 'predicted'" % opf2FieldNames[i]))
mismatchCount = 0
while True:
try:
opf1Row = opf1CsvReader.next()
except StopIteration:
opf1EOF = True
else:
opf1CurrentDataRowIndex += 1
try:
opf2Row = opf2CsvReader.next()
except StopIteration:
opf2EOF = True
else:
opf2CurrentDataRowIndex += 1
if (opf1EOF != opf2EOF):
print ('%s: ERROR: Data row counts mismatch: opf1EOF: %s, opf1CurrentDataRowIndex: %s; opf2EOF: %s, opf2CurrentDataRowIndex: %s' % (experimentLabel, opf1EOF, opf1CurrentDataRowIndex, opf2EOF, opf2CurrentDataRowIndex))
return False
if (opf1EOF and opf2EOF):
break
self.assertEqual(len(opf1Row), len(opf2Row))
for i in fieldsIndexesToCompare:
opf1FloatValue = float(opf1Row[i])
opf2FloatValue = float(opf2Row[i])
if (opf1FloatValue != opf2FloatValue):
mismatchCount += 1
print ('%s: ERROR: mismatch in prediction values: dataRowIndex: %s, fieldIndex: %s (%r); opf1FieldValue: <%s>, opf2FieldValue: <%s>; opf1FieldValueAsFloat: %s, opf2FieldValueAsFloat: %s; opf1Row: %s, opf2Row: %s' % (experimentLabel, opf1CurrentDataRowIndex, i, opf1FieldNames[i], opf1Row[i], opf2Row[i], opf1FloatValue, opf2FloatValue, opf1Row, opf2Row))
if ((maxMismatches is not None) and (mismatchCount >= maxMismatches)):
break
if (mismatchCount != 0):
print ('%s: ERROR: there were %s mismatches between %r and %r' % (experimentLabel, mismatchCount, path1, path2))
return False
self.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)
print ('%s: Comparison of predictions completed: OK; number of prediction rows examined: %s; path1: %r; path2: %r' % (experimentLabel, (opf1CurrentDataRowIndex + 1), path1, path2))
return True
|
'Open an OPF prediction CSV file and advance it to the first data row
Returns: the tuple (csvReader, fieldNames), where \'csvReader\' is the
csv reader object, and \'fieldNames\' is a sequence of field
names.'
| def _openOpfPredictionCsvFile(self, filepath):
| csvReader = self._openCsvFile(filepath)
names = csvReader.next()
_types = csvReader.next()
_specials = csvReader.next()
return (csvReader, names)
|
'Test that we get the same predictions out from the following two
scenarios:
a_plus_b: Run the network for \'a\' iterations followed by \'b\' iterations
a, followed by b: Run the network for \'a\' iterations, save it, load it
back in, then run for \'b\' iterations.
Parameters:
experiment: base directory of the experiment. This directory should
contain the following:
base.py
a_plus_b/description.py
a/description.py
b/description.py
The sub-directory description files should import the
base.py and only change the first and last record used
from the data file.
predSteps: Number of steps ahead predictions are for
checkpointAt: Number of iterations that \'a\' runs for.
IMPORTANT: This must match the number of records that
a/description.py runs for - it is NOT dynamically stuffed into
the a/description.py.
predictionsFilename: The name of the predictions file that the OPF
generates for this experiment (for example
\'DefaulTask.NontemporalMultiStep.predictionLog.csv\')
newSerialization: Whether to use new capnproto serialization.'
| def _testSamePredictions(self, experiment, predSteps, checkpointAt, predictionsFilename, additionalFields=None, newSerialization=False):
| aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'a_plus_b')
aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'a')
bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'b')
args = self._createExperimentArgs(aPlusBExpDir, newSerialization=newSerialization)
_aPlusBExp = runExperiment(args)
args = self._createExperimentArgs(aExpDir, newSerialization=newSerialization)
_aExp = runExperiment(args)
if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'), dst=os.path.join(bExpDir, 'savedmodels'))
args = self._createExperimentArgs(bExpDir, newSerialization=newSerialization, additionalArgs=['--load=DefaultTask'])
_bExp = runExperiment(args)
aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference', predictionsFilename))
bPred = FileRecordStream(os.path.join(bExpDir, 'inference', predictionsFilename))
colNames = [x[0] for x in aPlusBPred.getFields()]
actValueColIdx = colNames.index('multiStepPredictions.actual')
predValueColIdx = colNames.index(('multiStepPredictions.%d' % predSteps))
for i in range(checkpointAt):
aPlusBPred.next()
for i in range(predSteps):
aPlusBPred.next()
bPred.next()
rowIdx = (((checkpointAt + predSteps) + 4) - 1)
epsilon = 0.0001
while True:
rowIdx += 1
try:
rowAPB = aPlusBPred.next()
rowB = bPred.next()
self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx], ('Mismatch in actual values: row %d of a+b has %s and row %d of b has %s' % (rowIdx, rowAPB[actValueColIdx], (rowIdx - checkpointAt), rowB[actValueColIdx])))
predAPB = eval(rowAPB[predValueColIdx])
predB = eval(rowB[predValueColIdx])
predAPB = [(a, b) for (b, a) in predAPB.items()]
predB = [(a, b) for (b, a) in predB.items()]
predAPB.sort(reverse=True)
predB.sort(reverse=True)
if (additionalFields is not None):
for additionalField in additionalFields:
fieldIdx = colNames.index(additionalField)
self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx], ("Mismatch in field '%s' values: row %d of a+b has value: (%s)\n and row %d of b has value: %s" % (additionalField, rowIdx, rowAPB[fieldIdx], (rowIdx - checkpointAt), rowB[fieldIdx])))
self.assertEqual(len(predAPB), len(predB), ('Mismatch in predicted values: row %d of a+b has %d predictions: \n (%s) and row %d of b has %d predictions:\n (%s)' % (rowIdx, len(predAPB), predAPB, (rowIdx - checkpointAt), len(predB), predB)))
for i in range(len(predAPB)):
(aProb, aValue) = predAPB[i]
(bProb, bValue) = predB[i]
self.assertLess(abs((aValue - bValue)), epsilon, ('Mismatch in predicted values: row %d of a+b predicts value %s and row %d of b predicts %s' % (rowIdx, aValue, (rowIdx - checkpointAt), bValue)))
self.assertLess(abs((aProb - bProb)), epsilon, ('Mismatch in probabilities: row %d of a+b predicts %s with probability %s and row %d of b predicts %s with probability %s' % (rowIdx, aValue, aProb, (rowIdx - checkpointAt), bValue, bProb)))
except StopIteration:
break
shutil.rmtree(getCheckpointParentDir(aExpDir))
shutil.rmtree(getCheckpointParentDir(bExpDir))
shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))
print 'Predictions match!'
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.