desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output.'
def suppress(self):
return Suppress(self)
'Disables the skipping of whitespace before matching the characters in the ParserElement\'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars.'
def leaveWhitespace(self):
self.skipWhitespace = False return self
'Overrides the default whitespace chars'
def setWhitespaceChars(self, chars):
self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
'Overrides default behavior to expand <TAB>s to spaces before parsing the input string. Must be called before parseString when the input grammar contains elements that match <TAB> characters.'
def parseWithTabs(self):
self.keepTabs = True return self
'Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns.'
def ignore(self, other):
if isinstance(other, Suppress): if (other not in self.ignoreExprs): self.ignoreExprs.append(other) else: self.ignoreExprs.append(Suppress(other)) return self
'Enable display of debugging messages while doing pattern matching.'
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction)) self.debug = True return self
'Enable display of debugging messages while doing pattern matching. Set flag to True to enable, False to disable.'
def setDebug(self, flag=True):
if flag: self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self
'Check defined expressions for valid structure, check for infinite recursive definitions.'
def validate(self, validateTrace=[]):
self.checkRecursion([])
'Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing.'
def parseFile(self, file_or_filename):
try: file_contents = file_or_filename.read() except AttributeError: f = open(file_or_filename, 'rb') file_contents = f.read() f.close() return self.parseString(file_contents)
'Overrides the default Keyword chars'
def setDefaultKeywordChars(chars):
Keyword.DEFAULT_KEYWORD_CHARS = chars
'The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.'
def __init__(self, pattern, flags=0):
super(Regex, self).__init__() if (len(pattern) == 0): warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn(('invalid pattern (%s) passed to Regex' % pattern), SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = ('Expected ' + self.name) self.mayIndexError = False self.mayReturnEmpty = True
'Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=None) - escQuote - special quote sequence to escape an embedded quote string (such as SQL\'s "" to escape an embedded ") (default=None) - multiline - boolean indicating whether quotes can span multiple lines (default=False) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)'
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
super(QuotedString, self).__init__() quoteChar = quoteChar.strip() if (len(quoteChar) == 0): warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() if (endQuoteChar is None): endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if (len(endQuoteChar) == 0): warnings.warn('endQuoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults if multiline: self.flags = (re.MULTILINE | re.DOTALL) self.pattern = ('%s(?:[^%s%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) else: self.flags = 0 self.pattern = ('%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) if (len(self.endQuoteChar) > 1): self.pattern += (('|(?:' + ')|(?:'.join([('%s[^%s]' % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i]))) for i in range((len(self.endQuoteChar) - 1), 0, (-1))])) + ')') if escQuote: self.pattern += ('|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += ('|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = (re.escape(self.escChar) + '(.)') self.pattern += (')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn(('invalid pattern (%s) passed to Regex' % self.pattern), SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = ('Expected ' + self.name) self.mayIndexError = False self.mayReturnEmpty = True
'Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on all contained expressions.'
def leaveWhitespace(self):
self.skipWhitespace = False self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self
'Give plot a pause, so data is drawn and GUI\'s event loop can run.'
def refreshGUI(self):
plt.pause(0.0001)
'Parameters: _inputDimensions: The size of the input. (m,n) will give a size m x n _columnDimensions: The size of the 2 dimensional array of columns'
def __init__(self, inputDimensions, columnDimensions):
self.inputDimensions = inputDimensions self.columnDimensions = columnDimensions self.inputSize = np.array(inputDimensions).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize, dtype=uintType) self.activeArray = np.zeros(self.columnNumber, dtype=uintType) random.seed(1) self.sp = SP(self.inputDimensions, self.columnDimensions, potentialRadius=self.inputSize, numActiveColumnsPerInhArea=int((0.02 * self.columnNumber)), globalInhibition=True, seed=1, synPermActiveInc=0.01, synPermInactiveDec=0.008)
'create a random input vector'
def createInput(self):
print ((('-' * 70) + 'Creating a random input vector') + ('-' * 70)) self.inputArray[0:] = 0 for i in range(self.inputSize): self.inputArray[i] = random.randrange(2)
'Run the spatial pooler with the input vector'
def run(self):
print ((('-' * 80) + 'Computing the SDR') + ('-' * 80)) self.sp.compute(self.inputArray, True, self.activeArray) print self.activeArray.nonzero()
'Flip the value of 10% of input bits (add noise) :param noiseLevel: The percentage of total input bits that should be flipped'
def addNoise(self, noiseLevel):
for _ in range(int((noiseLevel * self.inputSize))): randomPosition = int((random.random() * self.inputSize)) if (self.inputArray[randomPosition] == 1): self.inputArray[randomPosition] = 0 else: self.inputArray[randomPosition] = 1
'Run one iteration of IdentityRegion\'s compute'
def compute(self, inputs, outputs):
outputs['out'][:] = inputs['in']
'Return the Spec for IdentityRegion.'
@classmethod def getSpec(cls):
spec = {'description': IdentityRegion.__doc__, 'singleNodeOnly': True, 'inputs': {'in': {'description': 'The input vector.', 'dataType': 'Real32', 'count': 0, 'required': True, 'regionLevel': False, 'isDefaultInput': True, 'requireSplitterMap': False}}, 'outputs': {'out': {'description': 'A copy of the input vector.', 'dataType': 'Real32', 'count': 0, 'regionLevel': True, 'isDefaultOutput': True}}, 'parameters': {'dataWidth': {'description': 'Size of inputs', 'accessMode': 'Read', 'dataType': 'UInt32', 'count': 1, 'constraints': ''}}} return spec
'Test with fast learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testFastLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBitsPerPattern, initialPerm=0.6, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True) print '\nRunning without PAM, 3 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=20, expMissingMax=None, pamLength=1, nTrainRepetitions=3)) print '\nRunning with PAM, 3 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=3))
'Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBitsPerPattern, initialPerm=0.11, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True) print '\nRunning without PAM, 10 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10)) print '\nRunning with PAM, 10 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10))
'Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearningWithOverlap(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 5 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=2, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=int((0.8 * numOnBitsPerPattern)), minThreshold=int((0.8 * numOnBitsPerPattern)), newSynapseCount=numOnBitsPerPattern, initialPerm=0.11, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, nTrainRepetitions=8, doResets=True) print '\nRunning without PAM, 10 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10)) print '\nRunning with PAM, 10 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10))
'Test with "Forbes-like" data. A bunch of sequences of lengths between 2 and 10 elements long. We will test with both fast and slow learning. Make sure PAM allows us to train with fewer repeats of the training data.'
def testForbesLikeData(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildSequencePool, numSequences=20, seqLen=[3, 10], numPatterns=10, numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=1, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=int((0.8 * numOnBitsPerPattern)), minThreshold=int((0.8 * numOnBitsPerPattern)), newSynapseCount=numOnBitsPerPattern, initialPerm=0.51, permanenceInc=0.1, permanenceDec=0.0, globalDecay=0.0, pamLength=0, checkSynapseConsistency=False, nTrainRepetitions=8, doResets=True) print '\nRunning without PAM, fast learning, 2 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=50, expMissingMax=None, pamLength=1, nTrainRepetitions=2)) print '\nRunning with PAM, fast learning, 2 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=2)) print '\nRunning without PAM, slow learning, 8 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=1, expMissingMax=None, initialPerm=0.31, pamLength=1, nTrainRepetitions=8)) print '\nRunning with PAM, slow learning, 8 repetitions of the training data...' self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, initialPerm=0.31, pamLength=5, nTrainRepetitions=8))
'Test creation, pickling, and basic run of learning and inference.'
def _basicTest(self, tm=None):
trainingSet = _getSimplePatterns(10, 10) for _ in range(2): for seq in trainingSet[0:5]: for _ in range(10): tm.learn(seq) tm.reset() print 'Learning completed' print 'Running inference' tm.collectStats = True for seq in trainingSet[0:5]: tm.reset() tm.resetStats() for _ in range(10): tm.infer(seq) if (VERBOSITY > 1): print _printOneTrainingVector(seq) tm.printStates(False, False) print print if (VERBOSITY > 1): print tm.getStats() self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8) print ("tm.getStats()['predictionScoreAvg2'] = ", tm.getStats()['predictionScoreAvg2']) print 'TMConstantTest ok'
'Print a single vector succinctly.'
def _printOneTrainingVector(self, x):
print ''.join((('1' if (k != 0) else '.') for k in x))
'Print all vectors'
def _printAllTrainingSequences(self, trainingSequences):
for (i, trainingSequence) in enumerate(trainingSequences): print '============= Sequence', i, '=================' for pattern in trainingSequence: self._printOneTrainingVector(pattern)
'Set verbosity level on the TM'
def _setVerbosity(self, verbosity, tm, tmPy):
tm.cells4.setVerbosity(verbosity) tm.verbosity = verbosity tmPy.verbosity = verbosity
'Create an instance of the appropriate temporal memory. We isolate all parameters as constants specified here.'
def _createTMs(self, numCols, fixedResources=False, checkSynapseConsistency=True):
minThreshold = 4 activationThreshold = 8 newSynapseCount = 15 initialPerm = 0.3 connectedPerm = 0.5 permanenceInc = 0.1 permanenceDec = 0.05 if fixedResources: permanenceDec = 0.1 maxSegmentsPerCell = 5 maxSynapsesPerSegment = 15 globalDecay = 0 maxAge = 0 else: permanenceDec = 0.05 maxSegmentsPerCell = (-1) maxSynapsesPerSegment = (-1) globalDecay = 0.0001 maxAge = 1 if g_testCPPTM: if (g_options.verbosity > 1): print 'Creating BacktrackingTMCPP instance' cppTM = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=4, initialPerm=initialPerm, connectedPerm=connectedPerm, minThreshold=minThreshold, newSynapseCount=newSynapseCount, permanenceInc=permanenceInc, permanenceDec=permanenceDec, activationThreshold=activationThreshold, globalDecay=globalDecay, maxAge=maxAge, burnIn=1, seed=g_options.seed, verbosity=g_options.verbosity, checkSynapseConsistency=checkSynapseConsistency, pamLength=1000, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment) cppTM.retrieveLearningStates = True else: cppTM = None if (g_options.verbosity > 1): print 'Creating PY TM instance' pyTM = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=4, initialPerm=initialPerm, connectedPerm=connectedPerm, minThreshold=minThreshold, newSynapseCount=newSynapseCount, permanenceInc=permanenceInc, permanenceDec=permanenceDec, activationThreshold=activationThreshold, globalDecay=globalDecay, maxAge=maxAge, burnIn=1, seed=g_options.seed, verbosity=g_options.verbosity, pamLength=1000, maxSegmentsPerCell=maxSegmentsPerCell, maxSynapsesPerSegment=maxSynapsesPerSegment) return (cppTM, pyTM)
'Very simple patterns. Each pattern has numOnes consecutive bits on. There are numPatterns*numOnes bits in the vector. These patterns are used as elements of sequences when building up a training set.'
def _getSimplePatterns(self, numOnes, numPatterns):
numCols = (numOnes * numPatterns) p = [] for i in xrange(numPatterns): x = numpy.zeros(numCols, dtype='float32') x[(i * numOnes):((i + 1) * numOnes)] = 1 p.append(x) return p
'A simple sequence of 5 patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. The function returns a pair: trainingSequences: A list containing numRepetitions instances of the above sequence testSequence: A single clean test sequence containing the 5 patterns but with no noise on the right half'
def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) trainingSequences = [] for _ in xrange(numRepetitions): sequence = [] for j in xrange(numPatterns): v = numpy.zeros(numCols) v[0:halfCols] = p[j] noiseIndices = (self._rgen.permutation(halfCols) + halfCols)[0:numNoiseBits] v[noiseIndices] = 1 sequence.append(v) trainingSequences.append(sequence) testSequence = [] for j in xrange(numPatterns): v = numpy.zeros(numCols, dtype='float32') v[0:halfCols] = p[j] testSequence.append(v) if (g_options.verbosity > 1): print '\nTraining sequences' self._printAllTrainingSequences(trainingSequences) print '\nTest sequence' self._printAllTrainingSequences([testSequence]) return (trainingSequences, [testSequence])
'Three simple sequences, composed of the same 5 static patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. Sequence 1 is: p0, p1, p2, p3, p4 Sequence 2 is: p4, p3, p2, p1, p0 Sequence 3 is: p2, p0, p4, p1, p3 The function returns a pair: trainingSequences: A list containing numRepetitions instances of the above sequences testSequence: Clean test sequences with no noise on the right half'
def _buildSL2TrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) numSequences = 3 indices = [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [2, 0, 4, 1, 3]] trainingSequences = [] for i in xrange((numRepetitions * numSequences)): sequence = [] for j in xrange(numPatterns): v = numpy.zeros(numCols, dtype='float32') v[0:halfCols] = p[indices[(i % numSequences)][j]] noiseIndices = (self._rgen.permutation(halfCols) + halfCols)[0:numNoiseBits] v[noiseIndices] = 1 sequence.append(v) trainingSequences.append(sequence) testSequences = [] for i in xrange(numSequences): sequence = [] for j in xrange(numPatterns): v = numpy.zeros(numCols, dtype='float32') v[0:halfCols] = p[indices[(i % numSequences)][j]] sequence.append(v) testSequences.append(sequence) if (g_options.verbosity > 1): print '\nTraining sequences' self._printAllTrainingSequences(trainingSequences) print '\nTest sequences' self._printAllTrainingSequences(testSequences) return (trainingSequences, testSequences)
'Train the given TM once on the entire training set. on the Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code. Success code is 1 for pass, 0 for fail.'
def _testSegmentLearningSequence(self, tms, trainingSequences, testSequences, doResets=True):
if (testSequences == None): testSequences = trainingSequences (cppTM, pyTM) = (tms[0], tms[1]) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True) if (g_options.verbosity > 0): print '============= Training =================' print 'TM parameters:' print 'CPP' if (cppTM is not None): print cppTM.printParameters() print '\nPY' print pyTM.printParameters() for (sequenceNum, trainingSequence) in enumerate(trainingSequences): if (g_options.verbosity > 1): print '============= New sequence =================' if doResets: if (cppTM is not None): cppTM.reset() pyTM.reset() for (t, x) in enumerate(trainingSequence): if (g_options.verbosity > 1): print 'Time step', t, 'sequence number', sequenceNum print 'Input: ', pyTM.printInput(x) print 'NNZ:', x.nonzero() x = numpy.array(x).astype('float32') if (cppTM is not None): cppTM.learn(x) pyTM.learn(x) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity, relaxSegmentTests=False) == True) if (g_options.verbosity > 2): if (cppTM is not None): print 'CPP' cppTM.printStates(printPrevious=(g_options.verbosity > 4)) print '\nPY' pyTM.printStates(printPrevious=(g_options.verbosity > 4)) print if (g_options.verbosity > 4): print 'Sequence finished. Complete state after sequence' if (cppTM is not None): print 'CPP' cppTM.printCells() print '\nPY' pyTM.printCells() print if (g_options.verbosity > 2): print 'Calling trim segments' if (cppTM is not None): (nSegsRemovedCPP, nSynsRemovedCPP) = cppTM.trimSegments() (nSegsRemoved, nSynsRemoved) = pyTM.trimSegments() if (cppTM is not None): assert (nSegsRemovedCPP == nSegsRemoved) assert (nSynsRemovedCPP == nSynsRemoved) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True) print 'Training completed. Stats:' info = pyTM.getSegmentInfo() print ' nSegments:', info[0] print ' nSynapses:', info[1] if (g_options.verbosity > 3): print 'Complete state:' if (cppTM is not None): print 'CPP' cppTM.printCells() print '\nPY' pyTM.printCells() if (g_options.verbosity > 1): print '============= Inference =================' if (cppTM is not None): cppTM.collectStats = True pyTM.collectStats = True nPredictions = 0 (cppNumCorrect, pyNumCorrect) = (0, 0) for (sequenceNum, testSequence) in enumerate(testSequences): if (g_options.verbosity > 1): print '============= New sequence =================' slen = len(testSequence) if doResets: if (cppTM is not None): cppTM.reset() pyTM.reset() for (t, x) in enumerate(testSequence): if (g_options.verbosity >= 2): print 'Time step', t, '\nInput:' pyTM.printInput(x) if (cppTM is not None): cppTM.infer(x) pyTM.infer(x) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True) if (g_options.verbosity > 2): if (cppTM is not None): print 'CPP' cppTM.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False) print '\nPY' pyTM.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False) if (cppTM is not None): cppScores = cppTM.getStats() pyScores = pyTM.getStats() if (g_options.verbosity >= 2): if (cppTM is not None): print 'CPP' print cppScores print '\nPY' print pyScores if ((t < (slen - 1)) and (t > pyTM.burnIn)): nPredictions += 1 if (cppTM is not None): if (cppScores['curPredictionScore2'] > 0.3): cppNumCorrect += 1 if (pyScores['curPredictionScore2'] > 0.3): pyNumCorrect += 1 if (cppTM is not None): cppScores = cppTM.getStats() pyScores = pyTM.getStats() passTest = False if (cppTM is not None): if ((cppNumCorrect == nPredictions) and (pyNumCorrect == nPredictions)): passTest = True elif (pyNumCorrect == nPredictions): passTest = True if (not passTest): print 'CPP correct predictions:', cppNumCorrect print 'PY correct predictions:', pyNumCorrect print 'Total predictions:', nPredictions return passTest
'Test segment learning'
def _testSL1(self, numOnes=10, numRepetitions=6, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL1_FS' else: testName = 'TestSL1' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSegmentLearningTrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResources=fixedResources, checkSynapseConsistency=checkSynapseConsistency) testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print ('%s PASS' % testName) return 1 else: print ('%s FAILED' % testName) return 0
'Test segment learning'
def _testSL2(self, numOnes=10, numRepetitions=10, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL2_FS' else: testName = 'TestSL2' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSL2TrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResources=fixedResources, checkSynapseConsistency=checkSynapseConsistency) testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print ('%s PASS' % testName) return 1 else: print ('%s FAILED' % testName) return 0
'Test segment learning without fixed resources'
def test_SL1NoFixedResources(self):
self._testSL1(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL1WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL1(fixedResources=True, checkSynapseConsistency=g_options.long)
'Test segment learning without fixed resources'
def test_SL2NoFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL2WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=True, checkSynapseConsistency=g_options.long)
'requestedActivities: a sequence of PeriodicActivityRequest elements'
def __init__(self, requestedActivities):
self.__activities = [] for req in requestedActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period))]) self.__activities.append(act) return
'Activity tick handler; services all activities Returns: True if controlling iterator says it\'s okay to keep going; False to stop'
def tick(self):
for act in self.__activities: if (not act.iteratorHolder[0]): continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange(act.period)) else: act.iteratorHolder[0] = None return True
'dirPath: the path that we attempted to create for experiment files reason: any object that can be converted to a string that explains the reason (may be an exception)'
def __init__(self, dirPath, reason):
super(_CreateDirectoryException, self).__init__((('Error creating directory ' + '<%s>: %s.') % (str(dirPath), str(reason)))) self.reason = reason
'problem: a string-convertible object that describes the problem experienced by the error-reporting funciton. precursor: a string-convertible object that explains the original error that the error-reporting function was attempting to report when it encountered its own failure.'
def __init__(self, problem, precursor):
super(_ErrorReportingException, self).__init__((("Encountered error: '%s' while reporting " + "error: '%s'.") % (problem, precursor)))
'Get the sensor input element that corresponds to the given inference element. This is mainly used for metrics and prediction logging'
@staticmethod def getInputElement(inferenceElement):
return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
'Returns True if the inference from this timestep is predicted the input for the NEXT timestep. NOTE: This should only be checked IF THE MODEL\'S INFERENCE TYPE IS ALSO TEMPORAL. That is, a temporal model CAN have non-temporal inference elements, but a non-temporal model CANNOT have temporal inference elements'
@staticmethod def isTemporal(inferenceElement):
if (InferenceElement.__temporalInferenceElements is None): InferenceElement.__temporalInferenceElements = set([InferenceElement.prediction]) return (inferenceElement in InferenceElement.__temporalInferenceElements)
'Returns the number of records that elapse between when an inference is made and when the corresponding input record will appear. For example, a multistep prediction for 3 timesteps out will have a delay of 3 Parameters: inferenceElement: The InferenceElement value being delayed key: If the inference is a dictionary type, this specifies key for the sub-inference that is being delayed'
@staticmethod def getTemporalDelay(inferenceElement, key=None):
if (inferenceElement in (InferenceElement.prediction, InferenceElement.encodings)): return 1 if (inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences)): return 0 if (inferenceElement in (InferenceElement.multiStepPredictions, InferenceElement.multiStepBestPredictions)): return int(key) return 0
'Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: inferences: A dictionary where the keys are InferenceElements'
@staticmethod def getMaxDelay(inferences):
maxDelay = 0 for (inferenceElement, inference) in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay) else: maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement), maxDelay) return maxDelay
'Returns True if the inference type is \'temporal\', i.e. requires a temporal memory in the network.'
@staticmethod def isTemporal(inferenceType):
if (InferenceType.__temporalInferenceTypes is None): InferenceType.__temporalInferenceTypes = set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return (inferenceType in InferenceType.__temporalInferenceTypes)
'Instantiate the Hypersearch worker Parameters: options: The command line options. See the main() method for a description of these options cmdLineArgs: Copy of the command line arguments, so we can place them in the log'
def __init__(self, options, cmdLineArgs):
self._options = options self.logger = logging.getLogger('.'.join(['com.numenta.nupic.swarming', self.__class__.__name__])) if (options.logLevel is not None): self.logger.setLevel(options.logLevel) self.logger.info(('Launched with command line arguments: %s' % str(cmdLineArgs))) self.logger.debug(('Env variables: %s' % pprint.pformat(os.environ))) random.seed(42) self._hs = None self._modelIDCtrDict = dict() self._modelIDCtrList = [] self._modelIDSet = set() self._workerID = None
'For all models that modified their results since last time this method was called, send their latest results to the Hypersearch implementation.'
def _processUpdatedModels(self, cjDAO):
curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID) if (len(curModelIDCtrList) == 0): return self.logger.debug(('current modelID/updateCounters: %s' % str(curModelIDCtrList))) self.logger.debug(('last modelID/updateCounters: %s' % str(self._modelIDCtrList))) curModelIDCtrList = sorted(curModelIDCtrList) numItems = len(curModelIDCtrList) changedEntries = filter((lambda x: (x[1][1] != x[2][1])), itertools.izip(xrange(numItems), curModelIDCtrList, self._modelIDCtrList)) if (len(changedEntries) > 0): self.logger.debug('changedEntries: %s', str(changedEntries)) for entry in changedEntries: (idx, (modelID, curCtr), (_, oldCtr)) = entry self._modelIDCtrDict[modelID] = curCtr assert (self._modelIDCtrList[idx][0] == modelID) assert (curCtr != oldCtr) self._modelIDCtrList[idx][1] = curCtr changedModelIDs = [x[1][0] for x in changedEntries] modelResults = cjDAO.modelsGetResultAndStatus(changedModelIDs) for mResult in modelResults: results = mResult.results if (results is not None): results = json.loads(results) self._hs.recordModelProgress(modelID=mResult.modelId, modelParams=None, modelParamsHash=mResult.engParamsHash, results=results, completed=(mResult.status == cjDAO.STATUS_COMPLETED), completionReason=mResult.completionReason, matured=mResult.engMatured, numRecords=mResult.numRecords) curModelIDSet = set([x[0] for x in curModelIDCtrList]) newModelIDs = curModelIDSet.difference(self._modelIDSet) if (len(newModelIDs) > 0): self._modelIDSet.update(newModelIDs) curModelIDCtrDict = dict(curModelIDCtrList) modelInfos = cjDAO.modelsGetResultAndStatus(newModelIDs) modelInfos.sort() modelParamsAndHashs = cjDAO.modelsGetParams(newModelIDs) modelParamsAndHashs.sort() for (mResult, mParamsAndHash) in itertools.izip(modelInfos, modelParamsAndHashs): modelID = mResult.modelId assert (modelID == mParamsAndHash.modelId) self._modelIDCtrDict[modelID] = curModelIDCtrDict[modelID] self._modelIDCtrList.append([modelID, curModelIDCtrDict[modelID]]) results = mResult.results if (results is not None): results = json.loads(mResult.results) self._hs.recordModelProgress(modelID=modelID, modelParams=json.loads(mParamsAndHash.params), modelParamsHash=mParamsAndHash.engParamsHash, results=results, completed=(mResult.status == cjDAO.STATUS_COMPLETED), completionReason=mResult.completionReason, matured=mResult.engMatured, numRecords=mResult.numRecords) self._modelIDCtrList.sort()
'Run this worker. Parameters: retval: jobID of the job we ran. This is used by unit test code when calling this working using the --params command line option (which tells this worker to insert the job itself).'
def run(self):
options = self._options self.logger.info('Connecting to the jobs database') cjDAO = ClientJobsDAO.get() self._workerID = cjDAO.getConnectionID() if options.clearModels: cjDAO.modelsClearAll() if (options.params is not None): options.jobID = cjDAO.jobInsert(client='hwTest', cmdLine="echo 'test mode'", params=options.params, alreadyRunning=True, minimumWorkers=1, maximumWorkers=1, jobType=cjDAO.JOB_TYPE_HS) if (options.workerID is not None): wID = options.workerID else: wID = self._workerID buildID = Configuration.get('nupic.software.buildNumber', 'N/A') logPrefix = ('<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> ' % (buildID, wID, options.jobID)) ExtendedLogger.setLogPrefix(logPrefix) if options.resetJobStatus: cjDAO.jobSetFields(options.jobID, fields={'workerCompletionReason': ClientJobsDAO.CMPL_REASON_SUCCESS, 'cancel': False}, useConnectionID=False, ignoreUnchanged=True) jobInfo = cjDAO.jobInfo(options.jobID) self.logger.info(('Job info retrieved: %s' % str(clippedObj(jobInfo)))) jobParams = json.loads(jobInfo.params) jsonSchemaPath = os.path.join(os.path.dirname(__file__), 'jsonschema', 'jobParamsSchema.json') validate(jobParams, schemaPath=jsonSchemaPath) hsVersion = jobParams.get('hsVersion', None) if (hsVersion == 'v2'): self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID, cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel) else: raise RuntimeError(('Invalid Hypersearch implementation (%s) specified' % hsVersion)) try: exit = False numModelsTotal = 0 print >>sys.stderr, 'reporter:status:Evaluating first model...' while (not exit): batchSize = 10 modelIDToRun = None while (modelIDToRun is None): if (options.modelID is None): self._processUpdatedModels(cjDAO) (exit, newModels) = self._hs.createModels(numModels=batchSize) if exit: break if (len(newModels) == 0): continue for (modelParams, modelParamsHash, particleHash) in newModels: jsonModelParams = json.dumps(modelParams) (modelID, ours) = cjDAO.modelInsertAndStart(options.jobID, jsonModelParams, modelParamsHash, particleHash) if (not ours): mParamsAndHash = cjDAO.modelsGetParams([modelID])[0] mResult = cjDAO.modelsGetResultAndStatus([modelID])[0] results = mResult.results if (results is not None): results = json.loads(results) modelParams = json.loads(mParamsAndHash.params) particleHash = cjDAO.modelsGetFields(modelID, ['engParticleHash'])[0] particleInst = ('%s.%s' % (modelParams['particleState']['id'], modelParams['particleState']['genIdx'])) self.logger.info("Adding model %d to our internal DB because modelInsertAndStart() failed to insert it: paramsHash=%s, particleHash=%s, particleId='%s'", modelID, mParamsAndHash.engParamsHash.encode('hex'), particleHash.encode('hex'), particleInst) self._hs.recordModelProgress(modelID=modelID, modelParams=modelParams, modelParamsHash=mParamsAndHash.engParamsHash, results=results, completed=(mResult.status == cjDAO.STATUS_COMPLETED), completionReason=mResult.completionReason, matured=mResult.engMatured, numRecords=mResult.numRecords) else: modelIDToRun = modelID break else: modelIDToRun = int(options.modelID) mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[0] modelParams = json.loads(mParamsAndHash.params) modelParamsHash = mParamsAndHash.engParamsHash cjDAO.modelSetFields(modelIDToRun, dict(engWorkerConnId=self._workerID)) if False: for attempt in range(1000): paramsHash = hashlib.md5(('OrphanParams.%d.%d' % (modelIDToRun, attempt))).digest() particleHash = hashlib.md5(('OrphanParticle.%d.%d' % (modelIDToRun, attempt))).digest() try: cjDAO.modelSetFields(modelIDToRun, dict(engParamsHash=paramsHash, engParticleHash=particleHash)) success = True except: success = False if success: break if (not success): raise RuntimeError('Unexpected failure to change paramsHash and particleHash of orphaned model') (modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID, mParamsAndHash.params, modelParamsHash) if exit: break self.logger.info('RUNNING MODEL GID=%d, paramsHash=%s, params=%s', modelIDToRun, modelParamsHash.encode('hex'), modelParams) persistentJobGUID = jobParams['persistentJobGUID'] assert persistentJobGUID, ('persistentJobGUID: %r' % (persistentJobGUID,)) modelCheckpointGUID = (((jobInfo.client + '_') + persistentJobGUID) + ('_' + str(modelIDToRun))) self._hs.runModel(modelID=modelIDToRun, jobID=options.jobID, modelParams=modelParams, modelParamsHash=modelParamsHash, jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID) numModelsTotal += 1 self.logger.info('COMPLETED MODEL GID=%d; EVALUATED %d MODELs', modelIDToRun, numModelsTotal) print >>sys.stderr, ('reporter:status:Evaluated %d models...' % numModelsTotal) print >>sys.stderr, 'reporter:counter:HypersearchWorker,numModels,1' if (options.modelID is not None): exit = True finally: self._hs.close() self.logger.info(('FINISHED. Evaluated %d models.' % numModelsTotal)) print >>sys.stderr, ('reporter:status:Finished, evaluated %d models' % numModelsTotal) return options.jobID
'Parameters: modelID: ID of this model in the models table jobID: params: a dictionary of parameters for this dummy model. The possible keys are: delay: OPTIONAL-This specifies the amount of time (in seconds) that the experiment should wait before STARTING to process records. This is useful for simulating workers that start/end at different times finalDelay: OPTIONAL-This specifies the amount of time (in seconds) that the experiment should wait before it conducts its finalization operations. These operations include checking if the model is the best model, and writing out checkpoints. waitTime: OPTIONAL-The amount of time (in seconds) to wait in a busy loop to simulate computation time on EACH ITERATION randomizeWait: OPTIONAL-([0.0-1.0] ). Default:None If set to a value, the above specified wait time will be randomly be dithered by +/- <randomizeWait>% of the specfied value. For example, if randomizeWait=0.2, the wait time will be dithered by +/- 20% of its value. iterations: OPTIONAL-How many iterations to run the model for. -1 means run forever (default=1) metricFunctions: OPTIONAL-A list of single argument functions serialized as strings, which return the metric value given the record number. Mutually exclusive with metricValue metricValue: OPTIONAL-A single value to use for the metric value (used to debug hypersearch). Mutually exclusive with metricFunctions finalize: OPTIONAL-(True/False). Default:True When False, this will prevent the model from recording it\'s metrics and performing other functions that it usually performs after the model has finished running permutationParams: A dict containing the instances of all the variables being permuted over experimentDirectory: REQUIRED-An absolute path to a directory with a valid description.py file. NOTE: This does not actually affect the running of the model or the metrics produced. It is required to create certain objects (such as the output stream) makeCheckpoint: True to actually write a checkpoint out to disk (default: False) sysExitModelRange: A string containing two integers \'firstIdx, endIdx\'. When present, if we are running the firstIdx\'th model up to but not including the endIdx\'th model, then do a sys.exit() while running the model. This causes the worker to exit, simulating an orphaned model. delayModelRange: A string containing two integers \'firstIdx, endIdx\'. When present, if we are running the firstIdx\'th model up to but not including the endIdx\'th model, then do a delay of 10 sec. while running the model. This causes the worker to run slower and for some other worker to think the model should be orphaned. exitAfter: The number of iterations after which the model should perform a sys exit. This is an alternative way of creating an orphaned model that use\'s the dummmy model\'s modelIndex instead of the modelID errModelRange: A string containing two integers \'firstIdx, endIdx\'. When present, if we are running the firstIdx\'th model up to but not including the endIdx\'th model, then raise an exception while running the model. This causes the model to fail with a CMPL_REASON_ERROR reason sleepModelRange: A string containing 3 integers \'firstIdx, endIdx: delay\'. When present, if we are running the firstIdx\'th model up to but not including the endIdx\'th model, then sleep for delay seconds at the beginning of the run. jobFailErr: If true, model will raise a JobFailException which should cause the job to be marked as failed and immediately cancel all other workers. predictedField: Name of the input field for which this model is being optimized reportKeyPatterns: list of items from the results dict to include in the report. These can be regular expressions. optimizeKeyPattern: Which report item, if any, we will be optimizing for. This can also be a regular expression, but is an error if it matches more than one key from the experiment\'s results. jobsDAO: Jobs data access object - the interface to the jobs database which has the model\'s table. modelCheckpointGUID: A persistent, globally-unique identifier for constructing the model checkpoint key logLevel: override logging level to this value, if not None predictionCacheMaxRecords: Maximum number of records for the prediction output cache. Pass None for the default value.'
def __init__(self, modelID, jobID, params, predictedField, reportKeyPatterns, optimizeKeyPattern, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
super(OPFDummyModelRunner, self).__init__(modelID=modelID, jobID=jobID, predictedField=predictedField, experimentDir=None, reportKeyPatterns=reportKeyPatterns, optimizeKeyPattern=optimizeKeyPattern, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=None) self._predictionCacheMaxRecords = predictionCacheMaxRecords self._streamDef = copy.deepcopy(self._DUMMY_STREAMDEF) self._params = copy.deepcopy(self._DEFAULT_PARAMS) if (('permutationParams' in params) and ('__model_num' in params['permutationParams'])): self.modelIndex = params['permutationParams']['__model_num'] else: self.modelIndex = OPFDummyModelRunner.modelIndex OPFDummyModelRunner.modelIndex += 1 self._loadDummyModelParameters(params) self._logger.debug('Using Dummy model params: %s', self._params) self._busyWaitTime = self._params['waitTime'] self._iterations = self._params['iterations'] self._doFinalize = self._params['finalize'] self._delay = self._params['delay'] self._sleepModelRange = self._params['sleepModelRange'] self._makeCheckpoint = self._params['makeCheckpoint'] self._finalDelay = self._params['finalDelay'] self._exitAfter = self._params['exitAfter'] self.randomizeWait = self._params['randomizeWait'] if (self._busyWaitTime is not None): self.__computeWaitTime() if ((self._params['metricFunctions'] is not None) and (self._params['metricValue'] is not None)): raise RuntimeError("Error, only 1 of 'metricFunctions' or 'metricValue' can be passed to OPFDummyModelRunner params ") self.metrics = None self.metricValue = None if (self._params['metricFunctions'] is not None): self.metrics = eval(self._params['metricFunctions']) elif (self._params['metricValue'] is not None): self.metricValue = float(self._params['metricValue']) else: self.metrics = OPFDummyModelRunner.metrics[0] if (self._params['experimentDirectory'] is not None): self._model = self.__createModel(self._params['experimentDirectory']) self.__fieldInfo = self._model.getFieldInfo() self._sysExitModelRange = self._params['sysExitModelRange'] if (self._sysExitModelRange is not None): self._sysExitModelRange = [int(x) for x in self._sysExitModelRange.split(',')] self._delayModelRange = self._params['delayModelRange'] if (self._delayModelRange is not None): self._delayModelRange = [int(x) for x in self._delayModelRange.split(',')] self._errModelRange = self._params['errModelRange'] if (self._errModelRange is not None): self._errModelRange = [int(x) for x in self._errModelRange.split(',')] self._computModelDelay() self._jobFailErr = self._params['jobFailErr'] self._logger.debug('Dummy Model %d params %r', self._modelID, self._params)
'Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index'
def _loadDummyModelParameters(self, params):
for (key, value) in params.iteritems(): if (type(value) == list): index = (self.modelIndex % len(params[key])) self._params[key] = params[key][index] else: self._params[key] = params[key]
'Computes the amount of time (if any) to delay the run of this model. This can be determined by two mutually exclusive parameters: delay and sleepModelRange. \'delay\' specifies the number of seconds a model should be delayed. If a list is specified, the appropriate amount of delay is determined by using the model\'s modelIndex property. However, this doesn\'t work when testing orphaned models, because the modelIndex will be the same for every recovery attempt. Therefore, every recovery attempt will also be delayed and potentially orphaned. \'sleepModelRange\' doesn\'t use the modelIndex property for a model, but rather sees which order the model is in the database, and uses that to determine whether or not a model should be delayed.'
def _computModelDelay(self):
if ((self._params['delay'] is not None) and (self._params['sleepModelRange'] is not None)): raise RuntimeError("Only one of 'delay' or 'sleepModelRange' may be specified") if (self._sleepModelRange is not None): (range, delay) = self._sleepModelRange.split(':') delay = float(delay) range = map(int, range.split(',')) modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID) modelIDs.sort() range[1] = min(range[1], len(modelIDs)) if (self._modelID in modelIDs[range[0]:range[1]]): self._delay = delay else: self._delay = self._params['delay']
'Protected function that can be overridden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model'
def _getMetrics(self):
metric = None if (self.metrics is not None): metric = self.metrics((self._currentRecordIndex + 1)) elif (self.metricValue is not None): metric = self.metricValue else: raise RuntimeError('No metrics or metric value specified for dummy model') return {self._optimizeKeyPattern: metric}
'Runs the given OPF task against the given Model instance'
def run(self):
self._logger.debug(('Starting Dummy Model: modelID=%s;' % self._modelID)) periodic = self._initPeriodicActivities() self._optimizedMetricLabel = self._optimizeKeyPattern self._reportMetricLabels = [self._optimizeKeyPattern] if (self._iterations >= 0): iterTracker = iter(xrange(self._iterations)) else: iterTracker = iter(itertools.count()) doSysExit = False if (self._sysExitModelRange is not None): modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg, end) = self._sysExitModelRange if (self._modelID in modelIDs[int(beg):int(end)]): doSysExit = True if (self._delayModelRange is not None): modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg, end) = self._delayModelRange if (self._modelID in modelIDs[int(beg):int(end)]): time.sleep(10) if (self._errModelRange is not None): modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg, end) = self._errModelRange if (self._modelID in modelIDs[int(beg):int(end)]): raise RuntimeError('Exiting with error due to errModelRange parameter') if (self._delay is not None): time.sleep(self._delay) self._currentRecordIndex = 0 while True: if self._isKilled: break if self._isCanceled: break if self._isMature: if (not self._isBestModel): self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED break else: self._cmpReason = self._jobsDAO.CMPL_REASON_EOF try: self._currentRecordIndex = next(iterTracker) except StopIteration: break self._writePrediction(ModelResult(None, None, None, None)) periodic.tick() if self.__shouldSysExit(self._currentRecordIndex): sys.exit(1) if (self._busyWaitTime is not None): time.sleep(self._busyWaitTime) self.__computeWaitTime() if doSysExit: sys.exit(1) if self._jobFailErr: raise utils.JobFailException('E10000', "dummyModel's jobFailErr was True.") if self._doFinalize: if (not self._makeCheckpoint): self._model = None if (self._finalDelay is not None): time.sleep(self._finalDelay) self._finalize() self._logger.info(('Finished: modelID=%r ' % self._modelID)) return (self._cmpReason, None)
'Creates the model\'s PredictionLogger object, which is an interface to write model results to a permanent storage location'
def _createPredictionLogger(self):
class DummyLogger: def writeRecord(self, record): pass def writeRecords(self, records, progressCB): pass def close(self): pass self._predictionLogger = DummyLogger()
'Checks to see if the model should exit based on the exitAfter dummy parameter'
def __shouldSysExit(self, iteration):
if ((self._exitAfter is None) or (iteration < self._exitAfter)): return False results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params']) modelIDs = [e[0] for e in results] modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results] sameModelNumbers = filter((lambda x: (x[1] == self.modelIndex)), zip(modelIDs, modelNums)) firstModelID = min(zip(*sameModelNumbers)[0]) return (firstModelID == self._modelID)
'Create our state object. Parameters: hsObj: Reference to the HypersesarchV2 instance cjDAO: ClientJobsDAO instance logger: logger to use jobID: our JobID'
def __init__(self, hsObj):
self._hsObj = hsObj self.logger = self._hsObj.logger self._state = None self._priorStateJSON = None self._dirty = False self.readStateFromDB()
'Return true if our local copy of the state has changed since the last time we read from the DB.'
def isDirty(self):
return self._dirty
'Return true if the search should be considered over.'
def isSearchOver(self):
return self._state['searchOver']
'Set our state to that obtained from the engWorkerState field of the job record. Parameters: stateJSON: JSON encoded state from job record'
def readStateFromDB(self):
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, ['engWorkerState'])[0] if (self._priorStateJSON is None): swarms = dict() if (self._hsObj._fixedFields is not None): print self._hsObj._fixedFields encoderSet = [] for field in self._hsObj._fixedFields: if (field == '_classifierInput'): continue encoderName = self.getEncoderKeyFromName(field) assert (encoderName in self._hsObj._encoderNames), ("The field '%s' specified in the fixedFields list is not present in this model." % field) encoderSet.append(encoderName) encoderSet.sort() swarms['.'.join(encoderSet)] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': 0} elif (self._hsObj._searchType == HsSearchType.temporal): for encoderName in self._hsObj._encoderNames: swarms[encoderName] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': 0} elif (self._hsObj._searchType == HsSearchType.classification): for encoderName in self._hsObj._encoderNames: if (encoderName == self._hsObj._predictedFieldEncoder): continue swarms[encoderName] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': 0} elif (self._hsObj._searchType == HsSearchType.legacyTemporal): swarms[self._hsObj._predictedFieldEncoder] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': 0} else: raise RuntimeError(('Unsupported search type: %s' % self._hsObj._searchType)) self._state = dict(lastUpdateTime=time.time(), lastGoodSprint=None, searchOver=False, activeSwarms=swarms.keys(), swarms=swarms, sprints=[{'status': 'active', 'bestModelId': None, 'bestErrScore': None}], blackListedEncoders=[]) self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None) self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, ['engWorkerState'])[0] assert (self._priorStateJSON is not None) self._state = json.loads(self._priorStateJSON) self._dirty = False
'Update the state in the job record with our local changes (if any). If we don\'t have the latest state in our priorStateJSON, then re-load in the latest state and return False. If we were successful writing out our changes, return True Parameters: retval: True if we were successful writing out our changes False if our priorState is not the latest that was in the DB. In this case, we will re-load our state from the DB'
def writeStateToDB(self):
if (not self._dirty): return True self._state['lastUpdateTime'] = time.time() newStateJSON = json.dumps(self._state) success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID, 'engWorkerState', str(newStateJSON), str(self._priorStateJSON)) if success: self.logger.debug(('Success changing hsState to: \n%s ' % pprint.pformat(self._state, indent=4))) self._priorStateJSON = newStateJSON else: self.logger.debug(('Failed to change hsState to: \n%s ' % pprint.pformat(self._state, indent=4))) self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, ['engWorkerState'])[0] self._state = json.loads(self._priorStateJSON) self.logger.info(('New hsState has been set by some other worker to: \n%s' % pprint.pformat(self._state, indent=4))) return success
'Given an encoder dictionary key, get the encoder name. Encoders are a sub-dict within model params, and in HSv2, their key is structured like this for example: \'modelParams|sensorParams|encoders|home_winloss\' The encoderName is the last word in the | separated key name'
def getEncoderNameFromKey(self, key):
return key.split('|')[(-1)]
'Given an encoder name, get the key. Encoders are a sub-dict within model params, and in HSv2, their key is structured like this for example: \'modelParams|sensorParams|encoders|home_winloss\' The encoderName is the last word in the | separated key name'
def getEncoderKeyFromName(self, name):
return ('modelParams|sensorParams|encoders|%s' % name)
'Return the field contributions statistics. Parameters: retval: Dictionary where the keys are the field names and the values are how much each field contributed to the best score.'
def getFieldContributions(self):
if (self._hsObj._fixedFields is not None): return (dict(), dict()) predictedEncoderName = self._hsObj._predictedFieldEncoder fieldScores = [] for (swarmId, info) in self._state['swarms'].iteritems(): encodersUsed = swarmId.split('.') if (len(encodersUsed) != 1): continue field = self.getEncoderNameFromKey(encodersUsed[0]) bestScore = info['bestErrScore'] if (bestScore is None): (_modelId, bestScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId) fieldScores.append((bestScore, field)) if (self._hsObj._searchType == HsSearchType.legacyTemporal): assert (len(fieldScores) == 1) (baseErrScore, baseField) = fieldScores[0] for (swarmId, info) in self._state['swarms'].iteritems(): encodersUsed = swarmId.split('.') if (len(encodersUsed) != 2): continue fields = [self.getEncoderNameFromKey(name) for name in encodersUsed] fields.remove(baseField) fieldScores.append((info['bestErrScore'], fields[0])) else: fieldScores.sort(reverse=True) if ((self._hsObj._maxBranching > 0) and (len(fieldScores) > self._hsObj._maxBranching)): baseErrScore = fieldScores[((- self._hsObj._maxBranching) - 1)][0] else: baseErrScore = fieldScores[0][0] pctFieldContributionsDict = dict() absFieldContributionsDict = dict() if (baseErrScore is not None): if (abs(baseErrScore) < 1e-05): baseErrScore = 1e-05 for (errScore, field) in fieldScores: if (errScore is not None): pctBetter = (((baseErrScore - errScore) * 100.0) / baseErrScore) else: pctBetter = 0.0 errScore = baseErrScore pctFieldContributionsDict[field] = pctBetter absFieldContributionsDict[field] = (baseErrScore - errScore) self.logger.debug(('FieldContributions: %s' % pctFieldContributionsDict)) return (pctFieldContributionsDict, absFieldContributionsDict)
'Return the list of all swarms in the given sprint. Parameters: retval: list of active swarm Ids in the given sprint'
def getAllSwarms(self, sprintIdx):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['sprintIdx'] == sprintIdx): swarmIds.append(swarmId) return swarmIds
'Return the list of active swarms in the given sprint. These are swarms which still need new particles created in them. Parameters: sprintIdx: which sprint to query. If None, get active swarms from all sprints retval: list of active swarm Ids in the given sprint'
def getActiveSwarms(self, sprintIdx=None):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if ((sprintIdx is not None) and (info['sprintIdx'] != sprintIdx)): continue if (info['status'] == 'active'): swarmIds.append(swarmId) return swarmIds
'Return the list of swarms in the given sprint that were not killed. This is called when we are trying to figure out which encoders to carry forward to the next sprint. We don\'t want to carry forward encoder combintations which were obviously bad (in killed swarms). Parameters: retval: list of active swarm Ids in the given sprint'
def getNonKilledSwarms(self, sprintIdx):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if ((info['sprintIdx'] == sprintIdx) and (info['status'] != 'killed')): swarmIds.append(swarmId) return swarmIds
'Return the list of all completed swarms. Parameters: retval: list of active swarm Ids'
def getCompletedSwarms(self):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['status'] == 'completed'): swarmIds.append(swarmId) return swarmIds
'Return the list of all completing swarms. Parameters: retval: list of active swarm Ids'
def getCompletingSwarms(self):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['status'] == 'completing'): swarmIds.append(swarmId) return swarmIds
'Return the best model ID and it\'s errScore from the given swarm. If the swarm has not completed yet, the bestModelID will be None. Parameters: retval: (modelId, errScore)'
def bestModelInCompletedSwarm(self, swarmId):
swarmInfo = self._state['swarms'][swarmId] return (swarmInfo['bestModelId'], swarmInfo['bestErrScore'])
'Return the best model ID and it\'s errScore from the given sprint. If the sprint has not completed yet, the bestModelID will be None. Parameters: retval: (modelId, errScore)'
def bestModelInCompletedSprint(self, sprintIdx):
sprintInfo = self._state['sprints'][sprintIdx] return (sprintInfo['bestModelId'], sprintInfo['bestErrScore'])
'Return the best model ID and it\'s errScore from the given sprint, which may still be in progress. This returns the best score from all models in the sprint which have matured so far. Parameters: retval: (modelId, errScore)'
def bestModelInSprint(self, sprintIdx):
swarms = self.getAllSwarms(sprintIdx) bestModelId = None bestErrScore = numpy.inf for swarmId in swarms: (modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId) if (errScore < bestErrScore): bestModelId = modelId bestErrScore = errScore return (bestModelId, bestErrScore)
'Change the given swarm\'s state to \'newState\'. If \'newState\' is \'completed\', then bestModelId and bestErrScore must be provided. Parameters: swarmId: swarm Id newStatus: new status, either \'active\', \'completing\', \'completed\', or \'killed\''
def setSwarmState(self, swarmId, newStatus):
assert (newStatus in ['active', 'completing', 'completed', 'killed']) swarmInfo = self._state['swarms'][swarmId] if (swarmInfo['status'] == newStatus): return if ((swarmInfo['status'] == 'completed') and (newStatus == 'completing')): return self._dirty = True swarmInfo['status'] = newStatus if (newStatus == 'completed'): (modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId) swarmInfo['bestModelId'] = modelId swarmInfo['bestErrScore'] = errScore if ((newStatus != 'active') and (swarmId in self._state['activeSwarms'])): self._state['activeSwarms'].remove(swarmId) if (newStatus == 'killed'): self._hsObj.killSwarmParticles(swarmId) sprintIdx = swarmInfo['sprintIdx'] self.isSprintActive(sprintIdx) sprintInfo = self._state['sprints'][sprintIdx] statusCounts = dict(active=0, completing=0, completed=0, killed=0) bestModelIds = [] bestErrScores = [] for info in self._state['swarms'].itervalues(): if (info['sprintIdx'] != sprintIdx): continue statusCounts[info['status']] += 1 if (info['status'] == 'completed'): bestModelIds.append(info['bestModelId']) bestErrScores.append(info['bestErrScore']) if (statusCounts['active'] > 0): sprintStatus = 'active' elif (statusCounts['completing'] > 0): sprintStatus = 'completing' else: sprintStatus = 'completed' sprintInfo['status'] = sprintStatus if (sprintStatus == 'completed'): if (len(bestErrScores) > 0): whichIdx = numpy.array(bestErrScores).argmin() sprintInfo['bestModelId'] = bestModelIds[whichIdx] sprintInfo['bestErrScore'] = bestErrScores[whichIdx] else: sprintInfo['bestModelId'] = 0 sprintInfo['bestErrScore'] = numpy.inf bestPrior = numpy.inf for idx in range(sprintIdx): if (self._state['sprints'][idx]['status'] == 'completed'): (_, errScore) = self.bestModelInCompletedSprint(idx) if (errScore is None): errScore = numpy.inf else: errScore = numpy.inf if (errScore < bestPrior): bestPrior = errScore if (sprintInfo['bestErrScore'] >= bestPrior): self._state['lastGoodSprint'] = (sprintIdx - 1) if ((self._state['lastGoodSprint'] is not None) and (not self.anyGoodSprintsActive())): self._state['searchOver'] = True
'Return True if there are any more good sprints still being explored. A \'good\' sprint is one that is earlier than where we detected an increase in error from sprint to subsequent sprint.'
def anyGoodSprintsActive(self):
if (self._state['lastGoodSprint'] is not None): goodSprints = self._state['sprints'][0:(self._state['lastGoodSprint'] + 1)] else: goodSprints = self._state['sprints'] for sprint in goodSprints: if (sprint['status'] == 'active'): anyActiveSprints = True break else: anyActiveSprints = False return anyActiveSprints
'Return True if the given sprint has completed.'
def isSprintCompleted(self, sprintIdx):
numExistingSprints = len(self._state['sprints']) if (sprintIdx >= numExistingSprints): return False return (self._state['sprints'][sprintIdx]['status'] == 'completed')
'See if we can kill off some speculative swarms. If an earlier sprint has finally completed, we can now tell which fields should *really* be present in the sprints we\'ve already started due to speculation, and kill off the swarms that should not have been included.'
def killUselessSwarms(self):
numExistingSprints = len(self._state['sprints']) if (self._hsObj._searchType == HsSearchType.legacyTemporal): if (numExistingSprints <= 2): return elif (numExistingSprints <= 1): return completedSwarms = self.getCompletedSwarms() completedSwarms = [(swarm, self._state['swarms'][swarm], self._state['swarms'][swarm]['bestErrScore']) for swarm in completedSwarms] completedMatrix = [[] for i in range(numExistingSprints)] for swarm in completedSwarms: completedMatrix[swarm[1]['sprintIdx']].append(swarm) for sprint in completedMatrix: sprint.sort(key=itemgetter(2)) activeSwarms = self.getActiveSwarms() activeSwarms.extend(self.getCompletingSwarms()) activeSwarms = [(swarm, self._state['swarms'][swarm], self._state['swarms'][swarm]['bestErrScore']) for swarm in activeSwarms] activeMatrix = [[] for i in range(numExistingSprints)] for swarm in activeSwarms: activeMatrix[swarm[1]['sprintIdx']].append(swarm) for sprint in activeMatrix: sprint.sort(key=itemgetter(2)) toKill = [] for i in range(1, numExistingSprints): for swarm in activeMatrix[i]: curSwarmEncoders = swarm[0].split('.') if (len(activeMatrix[(i - 1)]) == 0): if ((i == 2) and (self._hsObj._tryAll3FieldCombinations or self._hsObj._tryAll3FieldCombinationsWTimestamps)): pass else: bestInPrevious = completedMatrix[(i - 1)][0] bestEncoders = bestInPrevious[0].split('.') for encoder in bestEncoders: if (not (encoder in curSwarmEncoders)): toKill.append(swarm) if (len(toKill) > 0): print ('ParseMe: Killing encoders:' + str(toKill)) for swarm in toKill: self.setSwarmState(swarm[0], 'killed') return
'If the given sprint exists and is active, return active=True. If the sprint does not exist yet, this call will create it (and return active=True). If it already exists, but is completing or complete, return active=False. If sprintIdx is past the end of the possible sprints, return active=False, noMoreSprints=True IMPORTANT: When speculative particles are enabled, this call has some special processing to handle speculative sprints: * When creating a new speculative sprint (creating sprint N before sprint N-1 has completed), it initially only puts in only ONE swarm into the sprint. * Every time it is asked if sprint N is active, it also checks to see if it is time to add another swarm to the sprint, and adds a new swarm if appropriate before returning active=True * We decide it is time to add a new swarm to a speculative sprint when ALL of the currently active swarms in the sprint have all the workers they need (number of running (not mature) particles is _minParticlesPerSwarm). This means that we have capacity to run additional particles in a new swarm. It is expected that the sprints will be checked IN ORDER from 0 on up. (It is an error not to) The caller should always try to allocate from the first active sprint it finds. If it can\'t, then it can call this again to find/create the next active sprint. Parameters: retval: (active, noMoreSprints) active: True if the given sprint is active noMoreSprints: True if there are no more sprints possible'
def isSprintActive(self, sprintIdx):
while True: numExistingSprints = len(self._state['sprints']) if (sprintIdx <= (numExistingSprints - 1)): if (not self._hsObj._speculativeParticles): active = (self._state['sprints'][sprintIdx]['status'] == 'active') return (active, False) else: active = (self._state['sprints'][sprintIdx]['status'] == 'active') if (not active): return (active, False) activeSwarmIds = self.getActiveSwarms(sprintIdx) swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId, matured=False)[0] for swarmId in activeSwarmIds] notFullSwarms = [len(swarm) for swarm in swarmSizes if (len(swarm) < self._hsObj._minParticlesPerSwarm)] if (len(notFullSwarms) > 0): return (True, False) if (self._state['lastGoodSprint'] is not None): return (False, True) if (self._hsObj._fixedFields is not None): return (False, True) if ((sprintIdx > 0) and (self._state['sprints'][(sprintIdx - 1)]['status'] == 'completed')): (bestModelId, _) = self.bestModelInCompletedSprint((sprintIdx - 1)) (particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(bestModelId) bestSwarmId = particleState['swarmId'] baseEncoderSets = [bestSwarmId.split('.')] else: bestSwarmId = None particleState = None baseEncoderSets = [] for swarmId in self.getNonKilledSwarms((sprintIdx - 1)): baseEncoderSets.append(swarmId.split('.')) encoderAddSet = [] limitFields = False if ((self._hsObj._maxBranching > 0) or (self._hsObj._minFieldContribution >= 0)): if ((self._hsObj._searchType == HsSearchType.temporal) or (self._hsObj._searchType == HsSearchType.classification)): if (sprintIdx >= 1): limitFields = True baseSprintIdx = 0 elif (self._hsObj._searchType == HsSearchType.legacyTemporal): if (sprintIdx >= 2): limitFields = True baseSprintIdx = 1 else: raise RuntimeError(('Unimplemented search type %s' % self._hsObj._searchType)) if limitFields: (pctFieldContributions, absFieldContributions) = self.getFieldContributions() toRemove = [] self.logger.debug(('FieldContributions min: %s' % self._hsObj._minFieldContribution)) for fieldname in pctFieldContributions: if (pctFieldContributions[fieldname] < self._hsObj._minFieldContribution): self.logger.debug(('FieldContributions removing: %s' % fieldname)) toRemove.append(self.getEncoderKeyFromName(fieldname)) else: self.logger.debug(('FieldContributions keeping: %s' % fieldname)) swarms = self._state['swarms'] sprintSwarms = [(swarm, swarms[swarm]['bestErrScore']) for swarm in swarms if (swarms[swarm]['sprintIdx'] == baseSprintIdx)] sprintSwarms = sorted(sprintSwarms, key=itemgetter(1)) if (self._hsObj._maxBranching > 0): sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching] for swarm in sprintSwarms: swarmEncoders = swarm[0].split('.') for encoder in swarmEncoders: if (not (encoder in encoderAddSet)): encoderAddSet.append(encoder) encoderAddSet = [encoder for encoder in encoderAddSet if (not (str(encoder) in toRemove))] else: encoderAddSet = self._hsObj._encoderNames newSwarmIds = set() if (((self._hsObj._searchType == HsSearchType.temporal) or (self._hsObj._searchType == HsSearchType.legacyTemporal)) and (sprintIdx == 2) and (self._hsObj._tryAll3FieldCombinations or self._hsObj._tryAll3FieldCombinationsWTimestamps)): if self._hsObj._tryAll3FieldCombinations: newEncoders = set(self._hsObj._encoderNames) if (self._hsObj._predictedFieldEncoder in newEncoders): newEncoders.remove(self._hsObj._predictedFieldEncoder) else: newEncoders = set(encoderAddSet) if (self._hsObj._predictedFieldEncoder in newEncoders): newEncoders.remove(self._hsObj._predictedFieldEncoder) for encoder in self._hsObj._encoderNames: if (encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') or encoder.endswith('_dayOfWeek')): newEncoders.add(encoder) allCombos = list(itertools.combinations(newEncoders, 2)) for combo in allCombos: newSet = list(combo) newSet.append(self._hsObj._predictedFieldEncoder) newSet.sort() newSwarmId = '.'.join(newSet) if (newSwarmId not in self._state['swarms']): newSwarmIds.add(newSwarmId) if (len(self.getActiveSwarms((sprintIdx - 1))) > 0): break else: for baseEncoderSet in baseEncoderSets: for encoder in encoderAddSet: if ((encoder not in self._state['blackListedEncoders']) and (encoder not in baseEncoderSet)): newSet = list(baseEncoderSet) newSet.append(encoder) newSet.sort() newSwarmId = '.'.join(newSet) if (newSwarmId not in self._state['swarms']): newSwarmIds.add(newSwarmId) if (len(self.getActiveSwarms((sprintIdx - 1))) > 0): break newSwarmIds = sorted(newSwarmIds) if (len(newSwarmIds) == 0): if (len(self.getAllSwarms(sprintIdx)) > 0): return (True, False) else: return (False, True) self._dirty = True if (len(self._state['sprints']) == sprintIdx): self._state['sprints'].append({'status': 'active', 'bestModelId': None, 'bestErrScore': None}) for swarmId in newSwarmIds: self._state['swarms'][swarmId] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': sprintIdx} self._state['activeSwarms'] = self.getActiveSwarms() success = self.writeStateToDB() if success: return (True, False)
'Log \'msg % args\' with severity \'DEBUG\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)'
def debug(self, msg, *args, **kwargs):
self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'INFO\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.info("Houston, we have a %s", "interesting problem", exc_info=1)'
def info(self, msg, *args, **kwargs):
self._baseLogger.info(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'WARNING\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)'
def warning(self, msg, *args, **kwargs):
self._baseLogger.warning(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'ERROR\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.error("Houston, we have a %s", "major problem", exc_info=1)'
def error(self, msg, *args, **kwargs):
self._baseLogger.error(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'CRITICAL\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.critical("Houston, we have a %s", "major disaster", exc_info=1)'
def critical(self, msg, *args, **kwargs):
self._baseLogger.critical(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with the integer severity \'level\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1)'
def log(self, level, msg, *args, **kwargs):
self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args, **kwargs)
'Record the best score for a swarm\'s generation index (x) Returns list of swarmIds to terminate.'
def recordDataPoint(self, swarmId, generation, errScore):
terminatedSwarms = [] if (swarmId in self.swarmScores): entry = self.swarmScores[swarmId] assert (len(entry) == generation) entry.append(errScore) entry = self.swarmBests[swarmId] entry.append(min(errScore, entry[(-1)])) assert (len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId])) else: assert (generation == 0) self.swarmScores[swarmId] = [errScore] self.swarmBests[swarmId] = [errScore] if ((generation + 1) < self.MATURITY_WINDOW): return terminatedSwarms if ((self.MAX_GENERATIONS is not None) and (generation > self.MAX_GENERATIONS)): self._logger.info(('Swarm %s has matured (more than %d generations). Stopping' % (swarmId, self.MAX_GENERATIONS))) terminatedSwarms.append(swarmId) if self._isTerminationEnabled: terminatedSwarms.extend(self._getTerminatedSwarms(generation)) cumulativeBestScores = self.swarmBests[swarmId] if (cumulativeBestScores[(-1)] == cumulativeBestScores[(- self.MATURITY_WINDOW)]): self._logger.info(('Swarm %s has matured (no change in %d generations).Stopping...' % (swarmId, self.MATURITY_WINDOW))) terminatedSwarms.append(swarmId) self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms) return terminatedSwarms
'Returns the periodic checks to see if the model should continue running. Parameters: terminationFunc: The function that will be called in the model main loop as a wrapper around this function. Must have a parameter called \'index\' Returns: A list of PeriodicActivityRequest objects.'
def getTerminationCallbacks(self, terminationFunc):
activities = ([None] * len(ModelTerminator._MILESTONES)) for (index, (iteration, _)) in enumerate(ModelTerminator._MILESTONES): cb = functools.partial(terminationFunc, index=index) activities[index] = PeriodicActivityRequest(repeating=False, period=iteration, cb=cb)
'Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as a string'
@classmethod def getString(cls, prop):
if (cls._properties is None): cls._readStdConfigFiles() envValue = os.environ.get(('%s%s' % (cls.envPropPrefix, prop.replace('.', '_'))), None) if (envValue is not None): return envValue return cls._properties[prop]
'Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised Parameters: prop: name of the property retval: property value as bool'
@classmethod def getBool(cls, prop):
value = cls.getInt(prop) if (value not in (0, 1)): raise ValueError(('Expected 0 or 1, but got %r in config property %s' % (value, prop))) return bool(value)
'Retrieve the requested property and return it as an int. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as int'
@classmethod def getInt(cls, prop):
return int(cls.getString(prop))
'Retrieve the requested property and return it as a float. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as float'
@classmethod def getFloat(cls, prop):
return float(cls.getString(prop))
'Get the value of the given configuration property as string. This returns a string which is the property value, or the value of "default" arg if the property is not found. Use Configuration.getString() instead. NOTE: it\'s atypical for our configuration properties to be missing - a missing configuration property is usually a very serious error. Because of this, it\'s preferable to use one of the getString, getInt, getFloat, etc. variants instead of get(). Those variants will raise KeyError when an expected property is missing. Parameters: prop: name of the property default: default value to return if property does not exist retval: property value (as a string), or default if the property does not exist.'
@classmethod def get(cls, prop, default=None):
try: return cls.getString(prop) except KeyError: return default
'Set the value of the given configuration property. Parameters: prop: name of the property value: value to set'
@classmethod def set(cls, prop, value):
if (cls._properties is None): cls._readStdConfigFiles() cls._properties[prop] = str(value)
'Return a dict containing all of the configuration properties Parameters: retval: dict containing all configuration properties.'
@classmethod def dict(cls):
if (cls._properties is None): cls._readStdConfigFiles() result = dict(cls._properties) keys = os.environ.keys() replaceKeys = filter((lambda x: x.startswith(cls.envPropPrefix)), keys) for envKey in replaceKeys: key = envKey[len(cls.envPropPrefix):] key = key.replace('_', '.') result[key] = os.environ[envKey] return result
'Parse the given XML file and store all properties it describes. Parameters: filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path.'
@classmethod def readConfigFile(cls, filename, path=None):
properties = cls._readConfigFile(filename, path) if (cls._properties is None): cls._properties = dict() for name in properties: if ('value' in properties[name]): cls._properties[name] = properties[name]['value']
'Parse the given XML file and return a dict describing the file. Parameters: filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path. retval: returns a dict with each property as a key and a dict of all the property\'s attributes as value'
@classmethod def _readConfigFile(cls, filename, path=None):
outputProperties = dict() if (path is None): filePath = cls.findConfigFile(filename) else: filePath = os.path.join(path, filename) try: if (filePath is not None): try: _getLoggerBase().debug('Loading config file: %s', filePath) with open(filePath, 'r') as inp: contents = inp.read() except Exception: raise RuntimeError(('Expected configuration file at %s' % filePath)) else: try: contents = resource_string('nupic.support', filename) except Exception as resourceException: if (filename in [USER_CONFIG, CUSTOM_CONFIG]): contents = '<configuration/>' else: raise resourceException elements = ElementTree.XML(contents) if (elements.tag != 'configuration'): raise RuntimeError(("Expected top-level element to be 'configuration' but got '%s'" % elements.tag)) propertyElements = elements.findall('./property') for propertyItem in propertyElements: propInfo = dict() propertyAttributes = list(propertyItem) for propertyAttribute in propertyAttributes: propInfo[propertyAttribute.tag] = propertyAttribute.text name = propInfo.get('name', None) if (('value' in propInfo) and (propInfo['value'] is None)): value = '' else: value = propInfo.get('value', None) if (value is None): if ('novalue' in propInfo): continue else: raise RuntimeError(("Missing 'value' element within the property element: => %s " % str(propInfo))) restOfValue = value value = '' while True: pos = restOfValue.find('${env.') if (pos == (-1)): value += restOfValue break value += restOfValue[0:pos] varTailPos = restOfValue.find('}', pos) if (varTailPos == (-1)): raise RuntimeError(("Trailing environment variable tag delimiter '}' not found in %r" % restOfValue)) varname = restOfValue[(pos + 6):varTailPos] if (varname not in os.environ): raise RuntimeError(('Attempting to use the value of the environment variable %r, which is not defined' % varname)) envVarValue = os.environ[varname] value += envVarValue restOfValue = restOfValue[(varTailPos + 1):] if (name is None): raise RuntimeError(("Missing 'name' element within following property element:\n => %s " % str(propInfo))) propInfo['value'] = value outputProperties[name] = propInfo return outputProperties except Exception: _getLoggerBase().exception('Error while parsing configuration file: %s.', filePath) raise
'Clear out the entire configuration.'
@classmethod def clear(cls):
cls._properties = None cls._configPaths = None
'Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. Parameters: filename: name of file to locate'
@classmethod def findConfigFile(cls, filename):
paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
'Return the list of paths to search for configuration files. Parameters: retval: list of paths.'
@classmethod def getConfigPaths(cls):
configPaths = [] if (cls._configPaths is not None): return cls._configPaths else: if ('NTA_CONF_PATH' in os.environ): configVar = os.environ['NTA_CONF_PATH'] configPaths = configVar.split(os.pathsep) return configPaths
'Modify the paths we use to search for configuration files. Parameters: paths: list of paths to search for config files.'
@classmethod def setConfigPaths(cls, paths):
cls._configPaths = list(paths)