desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Run one iteration of TMRegion\'s compute'
def _compute(self, inputs, outputs):
if (self._tfdr is None): raise RuntimeError('TM has not been initialized') self._conditionalBreak() self._iterations += 1 buInputVector = inputs['bottomUpIn'] resetSignal = False if ('resetIn' in inputs): assert (len(inputs['resetIn']) == 1) if (inputs['resetIn'][0] != 0): self._tfdr.reset() self._sequencePos = 0 if self.computePredictedActiveCellIndices: prevPredictedState = self._tfdr.getPredictedState().reshape((-1)).astype('float32') if self.anomalyMode: prevPredictedColumns = self._tfdr.topDownCompute().copy().nonzero()[0] tpOutput = self._tfdr.compute(buInputVector, self.learningMode, self.inferenceMode) self._sequencePos += 1 if self.orColumnOutputs: tpOutput = tpOutput.reshape(self.columnCount, self.cellsPerColumn).max(axis=1) if self._fpLogTPOutput: output = tpOutput.reshape((-1)) outputNZ = tpOutput.nonzero()[0] outStr = ' '.join([('%d' % int(token)) for token in outputNZ]) print >>self._fpLogTPOutput, output.size, outStr outputs['bottomUpOut'][:] = tpOutput.flat if self.topDownMode: outputs['topDownOut'][:] = self._tfdr.topDownCompute().copy() if self.anomalyMode: activeLearnCells = self._tfdr.getLearnActiveStateT() size = (activeLearnCells.shape[0] * activeLearnCells.shape[1]) outputs['lrnActiveStateT'][:] = activeLearnCells.reshape(size) activeColumns = buInputVector.nonzero()[0] outputs['anomalyScore'][:] = anomaly.computeRawAnomalyScore(activeColumns, prevPredictedColumns) if self.computePredictedActiveCellIndices: activeState = self._tfdr._getActiveState().reshape((-1)).astype('float32') activeIndices = numpy.where((activeState != 0))[0] predictedIndices = numpy.where((prevPredictedState != 0))[0] predictedActiveIndices = numpy.intersect1d(activeIndices, predictedIndices) outputs['activeCells'].fill(0) outputs['activeCells'][activeIndices] = 1 outputs['predictedActiveCells'].fill(0) outputs['predictedActiveCells'][predictedActiveIndices] = 1
'Doesn\'t include the spatial, temporal and other parameters :returns: (dict) the base Spec for TMRegion.'
@classmethod def getBaseSpec(cls):
spec = dict(description=TMRegion.__doc__, singleNodeOnly=True, inputs=dict(bottomUpIn=dict(description='The input signal, conceptually organized as an\n image pyramid data structure, but internally\n organized as a flattened vector.', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), resetIn=dict(description='Effectively a boolean flag that indicates whether\n or not the input vector received in this compute cycle\n represents the first training presentation in a\n new temporal sequence.', dataType='Real32', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), sequenceIdIn=dict(description='Sequence ID', dataType='UInt64', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(bottomUpOut=dict(description='The output signal generated from the bottom-up inputs\n from lower levels.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), topDownOut=dict(description='The top-down inputsignal, generated from\n feedback from upper levels', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), activeCells=dict(description='The cells that are active', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), predictedActiveCells=dict(description='The cells that are active and predicted', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), anomalyScore=dict(description="The score for how 'anomalous' (i.e. rare) the current\n sequence is. Higher values are increasingly rare", dataType='Real32', count=1, regionLevel=True, isDefaultOutput=False), lrnActiveStateT=dict(description='Active cells during learn phase at time t. This is\n used for anomaly classification.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False)), parameters=dict(breakPdb=dict(description='Set to 1 to stop in the pdb debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), breakKomodo=dict(description='Set to 1 to stop in the Komodo debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite')), commands={}) return spec
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`. The parameters collection is constructed based on the parameters specified by the various components (spatialSpec, temporalSpec and otherSpec)'
@classmethod def getSpec(cls):
spec = cls.getBaseSpec() (t, o) = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp) spec['parameters'].update(t) spec['parameters'].update(o) return spec
':returns: instance of the underlying :class:`~nupic.algorithms.temporal_memory.TemporalMemory` algorithm object.'
def getAlgorithmInstance(self):
return self._tfdr
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`. Get the value of a parameter. Most parameters are handled automatically by :class:`~nupic.bindings.regions.PyRegion.PyRegion`\'s parameter get mechanism. The ones that need special treatment are explicitly handled here.'
def getParameter(self, parameterName, index=(-1)):
if (parameterName in self._temporalArgNames): return getattr(self._tfdr, parameterName) else: return PyRegion.getParameter(self, parameterName, index)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, parameterName, index, parameterValue):
if (parameterName in self._temporalArgNames): setattr(self._tfdr, parameterName, parameterValue) elif (parameterName == 'logPathOutput'): self.logPathOutput = parameterValue if (self._fpLogTPOutput is not None): self._fpLogTPOutput.close() self._fpLogTPOutput = None if parameterValue: self._fpLogTPOutput = open(self.logPathOutput, 'w') elif hasattr(self, parameterName): setattr(self, parameterName, parameterValue) else: raise Exception(('Unknown parameter: ' + parameterName))
'Resets the region\'s sequence states.'
def resetSequenceStates(self):
self._tfdr.reset() self._sequencePos = 0 return
'Perform an internal optimization step that speeds up inference if we know learning will not be performed anymore. This call may, for example, remove all potential inputs to each column.'
def finishLearning(self):
if (self._tfdr is None): raise RuntimeError('Temporal memory has not been initialized') if hasattr(self._tfdr, 'finishLearning'): self.resetSequenceStates() self._tfdr.finishLearning()
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.'
@staticmethod def getSchema():
return TMRegionProto
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`. Write state to proto object. :param proto: TMRegionProto capnproto object'
def writeToProto(self, proto):
proto.temporalImp = self.temporalImp proto.columnCount = self.columnCount proto.inputWidth = self.inputWidth proto.cellsPerColumn = self.cellsPerColumn proto.learningMode = self.learningMode proto.inferenceMode = self.inferenceMode proto.anomalyMode = self.anomalyMode proto.topDownMode = self.topDownMode proto.computePredictedActiveCellIndices = self.computePredictedActiveCellIndices proto.orColumnOutputs = self.orColumnOutputs if (self.temporalImp == 'py'): tmProto = proto.init('backtrackingTM') elif (self.temporalImp == 'cpp'): tmProto = proto.init('backtrackingTMCpp') elif (self.temporalImp == 'tm_py'): tmProto = proto.init('temporalMemory') elif (self.temporalImp == 'tm_cpp'): tmProto = proto.init('temporalMemory') else: raise TypeError('Unsupported temporalImp for capnp serialization: {}'.format(self.temporalImp)) self._tfdr.write(tmProto)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. Read state from proto object. :param proto: TMRegionProto capnproto object'
@classmethod def readFromProto(cls, proto):
instance = cls(proto.columnCount, proto.inputWidth, proto.cellsPerColumn) instance.temporalImp = proto.temporalImp instance.learningMode = proto.learningMode instance.inferenceMode = proto.inferenceMode instance.anomalyMode = proto.anomalyMode instance.topDownMode = proto.topDownMode instance.computePredictedActiveCellIndices = proto.computePredictedActiveCellIndices instance.orColumnOutputs = proto.orColumnOutputs if (instance.temporalImp == 'py'): tmProto = proto.backtrackingTM elif (instance.temporalImp == 'cpp'): tmProto = proto.backtrackingTMCpp elif (instance.temporalImp == 'tm_py'): tmProto = proto.temporalMemory elif (instance.temporalImp == 'tm_cpp'): tmProto = proto.temporalMemory else: raise TypeError('Unsupported temporalImp for capnp serialization: {}'.format(instance.temporalImp)) instance._tfdr = _getTPClass(proto.temporalImp).read(tmProto) return instance
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() for ephemeralMemberName in self._getEphemeralMembersAll(): state.pop(ephemeralMemberName, None) return state
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.serializeExtraData`.'
def serializeExtraData(self, filePath):
if (self._tfdr is not None): self._tfdr.saveToFile(filePath)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.deSerializeExtraData`. This method is called during network deserialization with an external filename that can be used to bypass pickle for loading large binary states. :param filePath: (string) absolute file path'
def deSerializeExtraData(self, filePath):
if (self._tfdr is not None): self._tfdr.loadFromFile(filePath)
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
if (not hasattr(self, 'storeDenseOutput')): self.storeDenseOutput = False if (not hasattr(self, 'computePredictedActiveCellIndices')): self.computePredictedActiveCellIndices = False self.__dict__.update(state) self._loaded = True self._initialize()
'Initialize all ephemerals used by derived classes.'
def _initEphemerals(self):
self._sequencePos = 0 self._fpLogTPOutput = None self.logPathOutput = None
'Callback that returns a list of all "ephemeral" members (i.e., data members that should not and/or cannot be pickled.)'
def _getEphemeralMembers(self):
return ['_sequencePos', '_fpLogTPOutput', 'logPathOutput']
'Returns list of all ephemeral members.'
def _getEphemeralMembersBase(self):
return ['_loaded', '_profileObj', '_iterations']
'Returns a concatenated list of both the standard base class ephemeral members, as well as any additional ephemeral members (e.g., file handles, etc.).'
def _getEphemeralMembersAll(self):
return (self._getEphemeralMembersBase() + self._getEphemeralMembers())
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'bottomUpOut'): return self.outputWidth elif (name == 'topDownOut'): return self.columnCount elif (name == 'lrnActiveStateT'): return self.outputWidth elif (name == 'activeCells'): return self.outputWidth elif (name == 'predictedActiveCells'): return self.outputWidth else: raise Exception('Invalid output name specified')
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.'
def getParameterArrayCount(self, name, index):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) return len(p)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.'
def getParameterArray(self, name, index, a):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) if (len(p) > 0): a[:] = p[:]
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.'
@classmethod def getSpec(cls):
ns = dict(description=SDRClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict(actValueIn=dict(description='Actual value of the field to predict. Only taken into account if the input has no category field.', dataType='Real32', count=0, required=False, regionLevel=False, isDefaultInput=False, requireSplitterMap=False), bucketIdxIn=dict(description='Active index of the encoder bucket for the actual value of the field to predict. Only taken into account if the input has no category field.', dataType='UInt64', count=0, required=False, regionLevel=False, isDefaultInput=False, requireSplitterMap=False), categoryIn=dict(description='Vector of categories of the input sample', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), bottomUpIn=dict(description="Belief values over children's groups", dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), predictedActiveCells=dict(description='The cells that are active and predicted', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), sequenceIdIn=dict(description='Sequence ID', dataType='UInt64', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(categoriesOut=dict(description='Classification results', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False, requireSplitterMap=False), actualValues=dict(description='Classification results', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False, requireSplitterMap=False), probabilities=dict(description='Classification results', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False, requireSplitterMap=False)), parameters=dict(learningMode=dict(description='Boolean (0/1) indicating whether or not a region is in learning mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=1, accessMode='ReadWrite'), inferenceMode=dict(description='Boolean (0/1) indicating whether or not a region is in inference mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), maxCategoryCount=dict(description='The maximal number of categories the classifier will distinguish between.', dataType='UInt32', required=True, count=1, constraints='', defaultValue=2000, accessMode='Create'), steps=dict(description='Comma separated list of the desired steps of prediction that the classifier should learn', dataType='Byte', count=0, constraints='', defaultValue='0', accessMode='Create'), alpha=dict(description='The alpha is the learning rate of the classifier.lower alpha results in longer term memory and slower learning', dataType='Real32', count=1, constraints='', defaultValue=0.001, accessMode='Create'), implementation=dict(description='The classifier implementation to use.', accessMode='ReadWrite', dataType='Byte', count=0, constraints='enum: py, cpp'), verbosity=dict(description='An integer that controls the verbosity level, 0 means no verbose output, increasing integers provide more verbosity.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='ReadWrite')), commands=dict()) return ns
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`. Is called once by NuPIC before the first call to compute(). Initializes self._sdrClassifier if it is not already initialized.'
def initialize(self):
if (self._sdrClassifier is None): self._sdrClassifier = SDRClassifierFactory.create(steps=self.stepsList, alpha=self.alpha, verbosity=self.verbosity, implementation=self.implementation)
':returns: (:class:`nupic.regions.sdr_classifier_region.SDRClassifierRegion`)'
def getAlgorithmInstance(self):
return self._sdrClassifier
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.'
def getParameter(self, name, index=(-1)):
return PyRegion.getParameter(self, name, index)
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, name, index, value):
if (name == 'learningMode'): self.learningMode = bool(int(value)) elif (name == 'inferenceMode'): self.inferenceMode = bool(int(value)) else: return PyRegion.setParameter(self, name, index, value)
':returns: the pycapnp proto type that the class uses for serialization.'
@staticmethod def getSchema():
return SDRClassifierRegionProto
'Write state to proto object. :param proto: SDRClassifierRegionProto capnproto object'
def writeToProto(self, proto):
proto.implementation = self.implementation proto.steps = self.steps proto.alpha = self.alpha proto.verbosity = self.verbosity proto.maxCategoryCount = self.maxCategoryCount self._sdrClassifier.write(proto.sdrClassifier)
'Read state from proto object. :param proto: SDRClassifierRegionProto capnproto object'
@classmethod def readFromProto(cls, proto):
instance = cls() instance.implementation = proto.implementation instance.steps = proto.steps instance.alpha = proto.alpha instance.verbosity = proto.verbosity instance.maxCategoryCount = proto.maxCategoryCount instance._sdrClassifier = SDRClassifierFactory.read(proto) return instance
'Process one input sample. This method is called by the runtime engine. :param inputs: (dict) mapping region input names to numpy.array values :param outputs: (dict) mapping region output names to numpy.arrays that should be populated with output values by this method'
def compute(self, inputs, outputs):
self._computeFlag = True patternNZ = inputs['bottomUpIn'].nonzero()[0] if self.learningMode: categories = [category for category in inputs['categoryIn'] if (category >= 0)] if (len(categories) > 0): bucketIdxList = [] actValueList = [] for category in categories: bucketIdxList.append(int(category)) if ('actValueIn' not in inputs): actValueList.append(int(category)) else: actValueList.append(float(inputs['actValueIn'])) classificationIn = {'bucketIdx': bucketIdxList, 'actValue': actValueList} else: if ('bucketIdxIn' not in inputs): raise KeyError('Network link missing: bucketIdxOut -> bucketIdxIn') if ('actValueIn' not in inputs): raise KeyError('Network link missing: actValueOut -> actValueIn') classificationIn = {'bucketIdx': int(inputs['bucketIdxIn']), 'actValue': float(inputs['actValueIn'])} else: classificationIn = {'actValue': 0, 'bucketIdx': 0} clResults = self._sdrClassifier.compute(recordNum=self.recordNum, patternNZ=patternNZ, classification=classificationIn, learn=self.learningMode, infer=self.inferenceMode) if ((clResults is not None) and (len(clResults) > 0)): outputs['actualValues'][:len(clResults['actualValues'])] = clResults['actualValues'] for step in self.stepsList: stepIndex = self.stepsList.index(step) categoryOut = clResults['actualValues'][clResults[step].argmax()] outputs['categoriesOut'][stepIndex] = categoryOut stepProbabilities = clResults[step] for categoryIndex in xrange(self.maxCategoryCount): flatIndex = (categoryIndex + (stepIndex * self.maxCategoryCount)) if (categoryIndex < len(stepProbabilities)): outputs['probabilities'][flatIndex] = stepProbabilities[categoryIndex] else: outputs['probabilities'][flatIndex] = 0.0 self.recordNum += 1
'Just return the inference value from one input sample. The actual learning happens in compute() -- if, and only if learning is enabled -- which is called when you run the network. .. warning:: This method is deprecated and exists only to maintain backward compatibility. This method is deprecated, and will be removed. Use :meth:`nupic.engine.Network.run` instead, which will call :meth:`~nupic.regions.sdr_classifier_region.compute`. :param recordNum: (int) Record number of the input sample. :param patternNZ: (list) of the active indices from the output below :param classification: (dict) of the classification information: * ``bucketIdx``: index of the encoder bucket * ``actValue``: actual value going into the encoder :returns: (dict) containing inference results, one entry for each step in ``self.steps``. The key is the number of steps, the value is an array containing the relative likelihood for each ``bucketIdx`` starting from 0. For example: {\'actualValues\': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]}'
def customCompute(self, recordNum, patternNZ, classification):
if (not hasattr(self, '_computeFlag')): self._computeFlag = False if self._computeFlag: warnings.simplefilter('error', DeprecationWarning) warnings.warn('The customCompute() method should not be called at the same time as the compute() method. The compute() method is called whenever network.run() is called.', DeprecationWarning) return self._sdrClassifier.compute(recordNum, patternNZ, classification, self.learningMode, self.inferenceMode)
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, outputName):
if (outputName == 'categoriesOut'): return len(self.stepsList) elif (outputName == 'probabilities'): return (len(self.stepsList) * self.maxCategoryCount) elif (outputName == 'actualValues'): return self.maxCategoryCount else: raise ValueError('Unknown output {}.'.format(outputName))
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.'
@classmethod def getSpec(cls):
ns = dict(description=KNNAnomalyClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict(spBottomUpOut=dict(description='The output signal generated from the bottom-up inputs\n from lower levels.', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), tpTopDownOut=dict(description='The top-down inputsignal, generated from\n feedback from upper levels', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), tpLrnActiveStateT=dict(description='Active cells in the learn state at time T from TM.\n This is used to classify on.', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), sequenceIdIn=dict(description='Sequence ID', dataType='UInt64', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False)), outputs=dict(), parameters=dict(trainRecords=dict(description='Number of records to wait for training', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), anomalyThreshold=dict(description='Threshold used to classify anomalies.', dataType='Real32', count=1, constraints='', defaultValue=0, accessMode='Create'), cacheSize=dict(description='Number of records to store in cache.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), classificationVectorType=dict(description='Vector type to use when classifying.\n 1 - Vector Column with Difference (TM and SP)\n ', dataType='UInt32', count=1, constraints='', defaultValue=1, accessMode='ReadWrite'), activeColumnCount=dict(description='Number of active columns in a given step. Typically\n equivalent to SP.numActiveColumnsPerInhArea', dataType='UInt32', count=1, constraints='', defaultValue=40, accessMode='ReadWrite'), classificationMaxDist=dict(description='Maximum distance a sample can be from an anomaly\n in the classifier to be labeled as an anomaly.\n\n Ex: With rawOverlap distance, a value of 0.65 means that the points\n must be at most a distance 0.65 apart from each other. This\n translates to they must be at least 35% similar.', dataType='Real32', count=1, constraints='', defaultValue=0.65, accessMode='Create')), commands=dict(getLabels=dict(description='Returns a list of label dicts with properties ROWID and labels.ROWID corresponds to the records id and labels is a list of strings representing the records labels. Takes additional integer properties start and end representing the range that will be returned.'), addLabel=dict(description='Takes parameters start, end and labelName. Adds the label labelName to the records from start to end. This will recalculate labels from end to the most recent record.'), removeLabels=dict(description='Takes additional parameters start, end, labelFilter. Start and end correspond to range to remove the label. Remove labels from each record with record ROWID in range from start to end, noninclusive of end. Removes all records if labelFilter is None, otherwise only removes the labels eqaul to labelFilter.'))) ns['parameters'].update(KNNClassifierRegion.getSpec()['parameters']) return ns
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.'
def getParameter(self, name, index=(-1)):
if (name == 'trainRecords'): return self.trainRecords elif (name == 'anomalyThreshold'): return self.anomalyThreshold elif (name == 'activeColumnCount'): return self._activeColumnCount elif (name == 'classificationMaxDist'): return self._classificationMaxDist else: return PyRegion.getParameter(self, name, index)
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, name, index, value):
if (name == 'trainRecords'): if (not (isinstance(value, float) or isinstance(value, int))): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. threshold must be a number." % type(value))) if ((len(self._recordsCache) > 0) and (value < self._recordsCache[0].ROWID)): raise HTMPredictionModelInvalidArgument(('Invalid value. autoDetectWaitRecord value must be valid record within output stream. Current minimum ROWID in output stream is %d.' % self._recordsCache[0].ROWID)) self.trainRecords = value self._deleteRangeFromKNN(0, self._recordsCache[0].ROWID) self._classifyStates() elif (name == 'anomalyThreshold'): if (not (isinstance(value, float) or isinstance(value, int))): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. threshold must be a number." % type(value))) self.anomalyThreshold = value self._classifyStates() elif (name == 'classificationMaxDist'): if (not (isinstance(value, float) or isinstance(value, int))): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. classificationMaxDist must be a number." % type(value))) self._classificationMaxDist = value self._classifyStates() elif (name == 'activeColumnCount'): self._activeColumnCount = value else: return PyRegion.setParameter(self, name, index, value)
'Process one input sample. This method is called by the runtime engine.'
def compute(self, inputs, outputs):
record = self._constructClassificationRecord(inputs) if (record.ROWID >= self.getParameter('trainRecords')): self._classifyState(record) self._recordsCache.append(record) while (len(self._recordsCache) > self.cacheSize): self._recordsCache.pop(0) self.labelResults = record.anomalyLabel self._iteration += 1
'Get the labels of the previously computed record. :returns: (list) of strings representing the classification labels'
def getLabelResults(self):
return self.labelResults
'Reclassifies all internal state'
def _classifyStates(self):
for state in self._recordsCache: self._classifyState(state)
'Reclassifies given state.'
def _classifyState(self, state):
if (state.ROWID < self.getParameter('trainRecords')): if (not state.setByUser): state.anomalyLabel = [] self._deleteRecordsFromKNN([state]) return label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL autoLabel = (label + KNNAnomalyClassifierRegion.AUTO_TAG) newCategory = self._recomputeRecordFromKNN(state) labelList = self._categoryToLabelList(newCategory) if state.setByUser: if (label in state.anomalyLabel): state.anomalyLabel.remove(label) if (autoLabel in state.anomalyLabel): state.anomalyLabel.remove(autoLabel) labelList.extend(state.anomalyLabel) if (state.anomalyScore >= self.getParameter('anomalyThreshold')): labelList.append(label) elif (label in labelList): ind = labelList.index(label) labelList[ind] = autoLabel labelList = list(set(labelList)) if ((label in labelList) and (autoLabel in labelList)): labelList.remove(autoLabel) if (state.anomalyLabel == labelList): return state.anomalyLabel = labelList if (state.anomalyLabel == []): self._deleteRecordsFromKNN([state]) else: self._addRecordToKNN(state)
'Construct a _HTMClassificationRecord based on the state of the model passed in through the inputs. Types for self.classificationVectorType: 1 - TM active cells in learn state 2 - SP columns concatenated with error from TM column predictions and SP'
def _constructClassificationRecord(self, inputs):
allSPColumns = inputs['spBottomUpOut'] activeSPColumns = allSPColumns.nonzero()[0] score = anomaly.computeRawAnomalyScore(activeSPColumns, self._prevPredictedColumns) spSize = len(allSPColumns) allTPCells = inputs['tpTopDownOut'] tpSize = len(inputs['tpLrnActiveStateT']) classificationVector = numpy.array([]) if (self.classificationVectorType == 1): classificationVector = numpy.zeros(tpSize) activeCellMatrix = inputs['tpLrnActiveStateT'].reshape(tpSize, 1) activeCellIdx = numpy.where((activeCellMatrix > 0))[0] if (activeCellIdx.shape[0] > 0): classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1 elif (self.classificationVectorType == 2): classificationVector = numpy.zeros((spSize + spSize)) if (activeSPColumns.shape[0] > 0): classificationVector[activeSPColumns] = 1.0 errorColumns = numpy.setdiff1d(self._prevPredictedColumns, activeSPColumns) if (errorColumns.shape[0] > 0): errorColumnIndexes = (numpy.array(errorColumns, dtype=numpy.uint16) + spSize) classificationVector[errorColumnIndexes] = 1.0 else: raise TypeError(("Classification vector type must be either 'tpc' or 'sp_tpe', current value is %s" % self.classificationVectorType)) numPredictedCols = len(self._prevPredictedColumns) predictedColumns = allTPCells.nonzero()[0] self._prevPredictedColumns = copy.deepcopy(predictedColumns) if (self._anomalyVectorLength is None): self._anomalyVectorLength = len(classificationVector) result = _CLAClassificationRecord(ROWID=self._iteration, anomalyScore=score, anomalyVector=classificationVector.nonzero()[0].tolist(), anomalyLabel=[]) return result
'Adds the record to the KNN classifier.'
def _addRecordToKNN(self, record):
knn = self._knnclassifier._knn prototype_idx = self._knnclassifier.getParameter('categoryRecencyList') category = self._labelListToCategoryNumber(record.anomalyLabel) if (record.ROWID in prototype_idx): knn.prototypeSetCategory(record.ROWID, category) return pattern = self._getStateAnomalyVector(record) rowID = record.ROWID knn.learn(pattern, category, rowID=rowID)
'Removes the given records from the classifier. parameters recordsToDelete - list of records to delete from the classififier'
def _deleteRecordsFromKNN(self, recordsToDelete):
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList') idsToDelete = [r.ROWID for r in recordsToDelete if ((not r.setByUser) and (r.ROWID in prototype_idx))] nProtos = self._knnclassifier._knn._numPatterns self._knnclassifier._knn.removeIds(idsToDelete) assert (self._knnclassifier._knn._numPatterns == (nProtos - len(idsToDelete)))
'Removes any stored records within the range from start to end. Noninclusive of end. parameters start - integer representing the ROWID of the start of the deletion range, end - integer representing the ROWID of the end of the deletion range, if None, it will default to end.'
def _deleteRangeFromKNN(self, start=0, end=None):
prototype_idx = numpy.array(self._knnclassifier.getParameter('categoryRecencyList')) if (end is None): end = (prototype_idx.max() + 1) idsIdxToDelete = numpy.logical_and((prototype_idx >= start), (prototype_idx < end)) idsToDelete = prototype_idx[idsIdxToDelete] nProtos = self._knnclassifier._knn._numPatterns self._knnclassifier._knn.removeIds(idsToDelete.tolist()) assert (self._knnclassifier._knn._numPatterns == (nProtos - len(idsToDelete)))
'returns the classified labeling of record'
def _recomputeRecordFromKNN(self, record):
inputs = {'categoryIn': [None], 'bottomUpIn': self._getStateAnomalyVector(record)} outputs = {'categoriesOut': numpy.zeros((1,)), 'bestPrototypeIndices': numpy.zeros((1,)), 'categoryProbabilitiesOut': numpy.zeros((1,))} classifier_indexes = numpy.array(self._knnclassifier.getParameter('categoryRecencyList')) valid_idx = numpy.where(((classifier_indexes >= self.getParameter('trainRecords')) & (classifier_indexes < record.ROWID)))[0].tolist() if (len(valid_idx) == 0): return None self._knnclassifier.setParameter('inferenceMode', None, True) self._knnclassifier.setParameter('learningMode', None, False) self._knnclassifier.compute(inputs, outputs) self._knnclassifier.setParameter('learningMode', None, True) classifier_distances = self._knnclassifier.getLatestDistances() valid_distances = classifier_distances[valid_idx] if (valid_distances.min() <= self._classificationMaxDist): classifier_indexes_prev = classifier_indexes[valid_idx] rowID = classifier_indexes_prev[valid_distances.argmin()] indexID = numpy.where((classifier_indexes == rowID))[0][0] category = self._knnclassifier.getCategoryList()[indexID] return category return None
'Since the KNN Classifier stores categories as numbers, we must store each label as a number. This method converts from a label to a unique number. Each label is assigned a unique bit so multiple labels may be assigned to a single record.'
def _labelToCategoryNumber(self, label):
if (label not in self.saved_categories): self.saved_categories.append(label) return pow(2, self.saved_categories.index(label))
'This method takes a list of labels and returns a unique category number. This enables this class to store a list of categories for each point since the KNN classifier only stores a single number category for each record.'
def _labelListToCategoryNumber(self, labelList):
categoryNumber = 0 for label in labelList: categoryNumber += self._labelToCategoryNumber(label) return categoryNumber
'Converts a category number into a list of labels'
def _categoryToLabelList(self, category):
if (category is None): return [] labelList = [] labelNum = 0 while (category > 0): if ((category % 2) == 1): labelList.append(self.saved_categories[labelNum]) labelNum += 1 category = (category >> 1) return labelList
'Returns a state\'s anomaly vertor converting it from spare to dense'
def _getStateAnomalyVector(self, state):
vector = numpy.zeros(self._anomalyVectorLength) vector[state.anomalyVector] = 1 return vector
'Get the labels on classified points within range start to end. Not inclusive of end. :returns: (dict) with format: \'isProcessing\': boolean, \'recordLabels\': list of results ``isProcessing`` - currently always false as recalculation blocks; used if reprocessing of records is still being performed; Each item in ``recordLabels`` is of format: \'ROWID\': id of the row, \'labels\': list of strings'
def getLabels(self, start=None, end=None):
if (len(self._recordsCache) == 0): return {'isProcessing': False, 'recordLabels': []} try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: end = self._recordsCache[(-1)].ROWID if (end <= start): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.", debugInfo={'requestRange': {'startRecordID': start, 'endRecordID': end}, 'numRecordsStored': len(self._recordsCache)}) results = {'isProcessing': False, 'recordLabels': []} ROWIDX = numpy.array(self._knnclassifier.getParameter('categoryRecencyList')) validIdx = numpy.where(((ROWIDX >= start) & (ROWIDX < end)))[0].tolist() categories = self._knnclassifier.getCategoryList() for idx in validIdx: row = dict(ROWID=int(ROWIDX[idx]), labels=self._categoryToLabelList(categories[idx])) results['recordLabels'].append(row) return results
'Add the label labelName to each record with record ROWID in range from ``start`` to ``end``, noninclusive of end. This will recalculate all points from end to the last record stored in the internal cache of this classifier. :param start: (int) start index :param end: (int) end index (noninclusive) :param labelName: (string) label name'
def addLabel(self, start, end, labelName):
if (len(self._recordsCache) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. Model has no saved records.") try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: end = int(self._recordsCache[(-1)].ROWID) startID = self._recordsCache[0].ROWID clippedStart = max(0, (start - startID)) clippedEnd = max(0, min(len(self._recordsCache), (end - startID))) if (clippedEnd <= clippedStart): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.", debugInfo={'requestRange': {'startRecordID': start, 'endRecordID': end}, 'clippedRequestRange': {'startRecordID': clippedStart, 'endRecordID': clippedEnd}, 'validRange': {'startRecordID': startID, 'endRecordID': self._recordsCache[(len(self._recordsCache) - 1)].ROWID}, 'numRecordsStored': len(self._recordsCache)}) for state in self._recordsCache[clippedStart:clippedEnd]: if (labelName not in state.anomalyLabel): state.anomalyLabel.append(labelName) state.setByUser = True self._addRecordToKNN(state) assert (len(self.saved_categories) > 0) for state in self._recordsCache[clippedEnd:]: self._classifyState(state)
'Remove labels from each record with record ROWID in range from ``start`` to ``end``, noninclusive of end. Removes all records if ``labelFilter`` is None, otherwise only removes the labels equal to ``labelFilter``. This will recalculate all points from end to the last record stored in the internal cache of this classifier. :param start: (int) start index :param end: (int) end index (noninclusive) :param labelFilter: (string) label filter'
def removeLabels(self, start=None, end=None, labelFilter=None):
if (len(self._recordsCache) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'removeLabels'. Model has no saved records.") try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: end = self._recordsCache[(-1)].ROWID startID = self._recordsCache[0].ROWID clippedStart = (0 if (start is None) else max(0, (start - startID))) clippedEnd = (len(self._recordsCache) if (end is None) else max(0, min(len(self._recordsCache), (end - startID)))) if (clippedEnd <= clippedStart): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'removeLabels'.", debugInfo={'requestRange': {'startRecordID': start, 'endRecordID': end}, 'clippedRequestRange': {'startRecordID': clippedStart, 'endRecordID': clippedEnd}, 'validRange': {'startRecordID': startID, 'endRecordID': self._recordsCache[(len(self._recordsCache) - 1)].ROWID}, 'numRecordsStored': len(self._recordsCache)}) recordsToDelete = [] for state in self._recordsCache[clippedStart:clippedEnd]: if (labelFilter is not None): if (labelFilter in state.anomalyLabel): state.anomalyLabel.remove(labelFilter) else: state.anomalyLabel = [] state.setByUser = False recordsToDelete.append(state) self._deleteRecordsFromKNN(recordsToDelete) self._deleteRangeFromKNN(start, end) for state in self._recordsCache[clippedEnd:]: self._classifyState(state)
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() state['_knnclassifierProps'] = state['_knnclassifier'].__getstate__() state.pop('_knnclassifier') return state
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
if (('_version' not in state) or (state['_version'] == 1)): knnclassifierProps = state.pop('_knnclassifierProps') self.__dict__.update(state) self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs) self._knnclassifier.__setstate__(knnclassifierProps) self._version = KNNAnomalyClassifierRegion.__VERSION__ else: raise Exception(('Invalid KNNAnomalyClassifierRegion version. Current version: %s' % KNNAnomalyClassifierRegion.__VERSION__))
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'labels'): return self._maxLabelOutputs else: raise Exception('Invalid output name specified')
':return: sensed value'
def getSensedValue(self):
return self._sensedValue
':param value: will be encoded when this region does a compute.'
def setSensedValue(self, value):
self._sensedValue = value
'Get the beginning part of the database name for the current version of the database. This, concatenated with \'_\' + Configuration.get(\'nupic.cluster.database.nameSuffix\') will produce the actual database name used.'
@classmethod def dbNamePrefix(cls):
return cls.__getDBNamePrefixForVersion(cls._DB_VERSION)
'Get the beginning part of the database name for the given database version. This, concatenated with \'_\' + Configuration.get(\'nupic.cluster.database.nameSuffix\') will produce the actual database name used. Parameters: dbVersion: ClientJobs database version number retval: the ClientJobs database name prefix for the given DB version'
@classmethod def __getDBNamePrefixForVersion(cls, dbVersion):
return ('%s_v%d' % (cls._DB_ROOT_NAME, dbVersion))
'Generates the ClientJobs database name for the current version of the database; "semi-private" class method for use by friends of the class. Parameters: retval: the ClientJobs database name'
@classmethod def _getDBName(cls):
return cls.__getDBNameForVersion(cls._DB_VERSION)
'Generates the ClientJobs database name for the given version of the database Parameters: dbVersion: ClientJobs database version number retval: the ClientJobs database name for the given DB version'
@classmethod def __getDBNameForVersion(cls, dbVersion):
prefix = cls.__getDBNamePrefixForVersion(dbVersion) suffix = Configuration.get('nupic.cluster.database.nameSuffix') suffix = suffix.replace('-', '_') suffix = suffix.replace('.', '_') dbName = ('%s_%s' % (prefix, suffix)) return dbName
'Get the instance of the ClientJobsDAO created for this process (or perhaps at some point in the future, for this thread). Parameters: retval: instance of ClientJobsDAO'
@staticmethod @logExceptions(_LOGGER) def get():
if (ClientJobsDAO._instance is None): cjDAO = ClientJobsDAO() cjDAO.connect() ClientJobsDAO._instance = cjDAO return ClientJobsDAO._instance
'Instantiate a ClientJobsDAO instance. Parameters:'
@logExceptions(_LOGGER) def __init__(self):
self._logger = _LOGGER assert (ClientJobsDAO._instance is None) self.dbName = self._getDBName() self._jobs = self._JobsTableInfo() self._jobs.tableName = ('%s.jobs' % self.dbName) self._models = self._ModelsTableInfo() self._models.tableName = ('%s.models' % self.dbName) self._connectionID = None
'Convert a database internal column name to a public name. This takes something of the form word1_word2_word3 and converts it to: word1Word2Word3. If the db field name starts with \'_\', it is stripped out so that the name is compatible with collections.namedtuple. for example: _word1_word2_word3 => word1Word2Word3 Parameters: dbName: database internal field name retval: public name'
def _columnNameDBToPublic(self, dbName):
words = dbName.split('_') if dbName.startswith('_'): words = words[1:] pubWords = [words[0]] for word in words[1:]: pubWords.append((word[0].upper() + word[1:])) return ''.join(pubWords)
'Locate the current version of the jobs DB or create a new one, and optionally delete old versions laying around. If desired, this method can be called at any time to re-create the tables from scratch, delete old versions of the database, etc. Parameters: deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database from scratch even if it already exists.'
@logExceptions(_LOGGER) @g_retrySQL def connect(self, deleteOldVersions=False, recreate=False):
with ConnectionFactory.get() as conn: self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions, recreate=recreate) conn.cursor.execute('SELECT CONNECTION_ID()') self._connectionID = conn.cursor.fetchall()[0][0] self._logger.info('clientJobsConnectionID=%r', self._connectionID) return
'Initialize tables, if needed Parameters: cursor: SQL cursor deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database from scratch even if it already exists.'
@logExceptions(_LOGGER) def _initTables(self, cursor, deleteOldVersions, recreate):
if deleteOldVersions: self._logger.info('Dropping old versions of client_jobs DB; called from: %r', traceback.format_stack()) for i in range(self._DB_VERSION): cursor.execute(('DROP DATABASE IF EXISTS %s' % (self.__getDBNameForVersion(i),))) if recreate: self._logger.info('Dropping client_jobs DB %r; called from: %r', self.dbName, traceback.format_stack()) cursor.execute(('DROP DATABASE IF EXISTS %s' % self.dbName)) cursor.execute(('CREATE DATABASE IF NOT EXISTS %s' % self.dbName)) cursor.execute(('SHOW TABLES IN %s' % self.dbName)) output = cursor.fetchall() tableNames = [x[0] for x in output] if ('jobs' not in tableNames): self._logger.info('Creating table %r', self.jobsTableName) fields = ['job_id INT UNSIGNED NOT NULL AUTO_INCREMENT', ('client CHAR(%d)' % self.CLIENT_MAX_LEN), 'client_info LONGTEXT', 'client_key varchar(255)', 'cmd_line LONGTEXT', 'params LONGTEXT', ('job_hash BINARY(%d) DEFAULT NULL' % self.HASH_MAX_LEN), 'status VARCHAR(16) DEFAULT "notStarted"', 'completion_reason VARCHAR(16)', 'completion_msg LONGTEXT', ('worker_completion_reason VARCHAR(16) DEFAULT "%s"' % self.CMPL_REASON_SUCCESS), 'worker_completion_msg LONGTEXT', 'cancel BOOLEAN DEFAULT FALSE', 'start_time DATETIME DEFAULT NULL', 'end_time DATETIME DEFAULT NULL', 'results LONGTEXT', '_eng_job_type VARCHAR(32)', 'minimum_workers INT UNSIGNED DEFAULT 0', 'maximum_workers INT UNSIGNED DEFAULT 0', ('priority INT DEFAULT %d' % self.DEFAULT_JOB_PRIORITY), '_eng_allocate_new_workers BOOLEAN DEFAULT TRUE', '_eng_untended_dead_workers BOOLEAN DEFAULT FALSE', 'num_failed_workers INT UNSIGNED DEFAULT 0', 'last_failed_worker_error_msg LONGTEXT', ('_eng_cleaning_status VARCHAR(16) DEFAULT "%s"' % self.CLEAN_NOT_DONE), 'gen_base_description LONGTEXT', 'gen_permutations LONGTEXT', '_eng_last_update_time DATETIME DEFAULT NULL', '_eng_cjm_conn_id INT UNSIGNED', '_eng_worker_state LONGTEXT', '_eng_status LONGTEXT', '_eng_model_milestones LONGTEXT', 'PRIMARY KEY (job_id)', 'UNIQUE INDEX (client, job_hash)', 'INDEX (status)', 'INDEX (client_key)'] options = ['AUTO_INCREMENT=1000'] query = ('CREATE TABLE IF NOT EXISTS %s (%s) %s' % (self.jobsTableName, ','.join(fields), ','.join(options))) cursor.execute(query) if ('models' not in tableNames): self._logger.info('Creating table %r', self.modelsTableName) fields = ['model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT', 'job_id INT UNSIGNED NOT NULL', 'params LONGTEXT NOT NULL', 'status VARCHAR(16) DEFAULT "notStarted"', 'completion_reason VARCHAR(16)', 'completion_msg LONGTEXT', 'results LONGTEXT DEFAULT NULL', 'optimized_metric FLOAT ', 'update_counter INT UNSIGNED DEFAULT 0', 'num_records INT UNSIGNED DEFAULT 0', 'start_time DATETIME DEFAULT NULL', 'end_time DATETIME DEFAULT NULL', 'cpu_time FLOAT DEFAULT 0', 'model_checkpoint_id LONGTEXT', 'gen_description LONGTEXT', ('_eng_params_hash BINARY(%d) DEFAULT NULL' % self.HASH_MAX_LEN), ('_eng_particle_hash BINARY(%d) DEFAULT NULL' % self.HASH_MAX_LEN), '_eng_last_update_time DATETIME DEFAULT NULL', '_eng_task_tracker_id TINYBLOB', '_eng_worker_id TINYBLOB', '_eng_attempt_id TINYBLOB', '_eng_worker_conn_id INT DEFAULT 0', '_eng_milestones LONGTEXT', '_eng_stop VARCHAR(16) DEFAULT NULL', '_eng_matured BOOLEAN DEFAULT FALSE', 'PRIMARY KEY (model_id)', 'UNIQUE INDEX (job_id, _eng_params_hash)', 'UNIQUE INDEX (job_id, _eng_particle_hash)'] options = ['AUTO_INCREMENT=1000'] query = ('CREATE TABLE IF NOT EXISTS %s (%s) %s' % (self.modelsTableName, ','.join(fields), ','.join(options))) cursor.execute(query) cursor.execute(('DESCRIBE %s' % self.jobsTableName)) fields = cursor.fetchall() self._jobs.dbFieldNames = [str(field[0]) for field in fields] cursor.execute(('DESCRIBE %s' % self.modelsTableName)) fields = cursor.fetchall() self._models.dbFieldNames = [str(field[0]) for field in fields] self._jobs.publicFieldNames = [self._columnNameDBToPublic(x) for x in self._jobs.dbFieldNames] self._models.publicFieldNames = [self._columnNameDBToPublic(x) for x in self._models.dbFieldNames] self._jobs.pubToDBNameDict = dict(zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames)) self._jobs.dbToPubNameDict = dict(zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames)) self._models.pubToDBNameDict = dict(zip(self._models.publicFieldNames, self._models.dbFieldNames)) self._models.dbToPubNameDict = dict(zip(self._models.dbFieldNames, self._models.publicFieldNames)) self._models.modelInfoNamedTuple = collections.namedtuple('_modelInfoNamedTuple', self._models.publicFieldNames) self._jobs.jobInfoNamedTuple = collections.namedtuple('_jobInfoNamedTuple', self._jobs.publicFieldNames) return
'Return a sequence of matching rows with the requested field values from a table or empty sequence if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/value mappings that identify the desired rows. If a value is an instance of ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the operator \'IN\' will be used in the corresponding SQL predicate; if the value is bool: "IS TRUE/FALSE"; if the value is None: "IS NULL"; \'=\' will be used for all other cases. selectFieldNames: list of fields to return, using internal field names maxRows: maximum number of rows to return; unlimited if maxRows is None retval: A sequence of matching rows, each row consisting of field values in the order of the requested field names. Empty sequence is returned when not match exists.'
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=None):
assert fieldsToMatch, repr(fieldsToMatch) assert all(((k in tableInfo.dbFieldNames) for k in fieldsToMatch.iterkeys())), repr(fieldsToMatch) assert selectFieldNames, repr(selectFieldNames) assert all(((f in tableInfo.dbFieldNames) for f in selectFieldNames)), repr(selectFieldNames) matchPairs = fieldsToMatch.items() matchExpressionGen = ((p[0] + ((' IS ' + {True: 'TRUE', False: 'FALSE'}[p[1]]) if isinstance(p[1], bool) else (' IS NULL' if (p[1] is None) else (' IN %s' if isinstance(p[1], self._SEQUENCE_TYPES) else '=%s')))) for p in matchPairs) matchFieldValues = [p[1] for p in matchPairs if ((not isinstance(p[1], bool)) and (p[1] is not None))] query = ('SELECT %s FROM %s WHERE (%s)' % (','.join(selectFieldNames), tableInfo.tableName, ' AND '.join(matchExpressionGen))) sqlParams = matchFieldValues if (maxRows is not None): query += ' LIMIT %s' sqlParams.append(maxRows) conn.cursor.execute(query, sqlParams) rows = conn.cursor.fetchall() if rows: assert ((maxRows is None) or (len(rows) <= maxRows)), ('%d !<= %d' % (len(rows), maxRows)) assert (len(rows[0]) == len(selectFieldNames)), ('%d != %d' % (len(rows[0]), len(selectFieldNames))) else: rows = tuple() return rows
'Like _getMatchingRowsNoRetries(), but with retries on transient MySQL failures'
@g_retrySQL def _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch, selectFieldNames, maxRows=None):
with ConnectionFactory.get() as conn: return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows)
'Return a single matching row with the requested field values from the the requested table or None if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/value mappings that identify the desired rows. If a value is an instance of ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the operator \'IN\' will be used in the corresponding SQL predicate; if the value is bool: "IS TRUE/FALSE"; if the value is None: "IS NULL"; \'=\' will be used for all other cases. selectFieldNames: list of fields to return, using internal field names retval: A sequence of field values of the matching row in the order of the given field names; or None if there was no match.'
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames):
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=1) if rows: assert (len(rows) == 1), repr(len(rows)) result = rows[0] else: result = None return result
'Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL failures'
@g_retrySQL def _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch, selectFieldNames):
with ConnectionFactory.get() as conn: return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames)
'Attempt to insert a row with the given parameters into the jobs table. Return jobID of the inserted row, or of an existing row with matching client/jobHash key. The combination of client and jobHash are expected to be unique (enforced by a unique index on the two columns). NOTE: It\'s possibe that this or another process (on this or another machine) already inserted a row with matching client/jobHash key (e.g., StreamMgr). This may also happen undetected by this function due to a partially-successful insert operation (e.g., row inserted, but then connection was lost while reading response) followed by retries either of this function or in SteadyDB module. Parameters: conn: Owned connection acquired from ConnectionFactory.get() client: Name of the client submitting the job cmdLine: Command line to use to launch each worker process; must be a non-empty string jobHash: unique hash of this job. The caller must insure that this, together with client, uniquely identifies this job request for the purposes of detecting duplicates. clientInfo: JSON encoded dict of client specific information. clientKey: Foreign key. params: JSON encoded dict of the parameters for the job. This can be fetched out of the database by the worker processes based on the jobID. minimumWorkers: minimum number of workers design at a time. maximumWorkers: maximum number of workers desired at a time. priority: Job scheduling priority; 0 is the default priority ( ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will be scheduled to run at the expense of the lower-priority jobs, and higher-priority job tasks will preempt those with lower priority if there is inadequate supply of scheduling slots. Excess lower priority job tasks will starve as long as slot demand exceeds supply. Most jobs should be scheduled with DEFAULT_JOB_PRIORITY. System jobs that must run at all cost, such as Multi-Model-Master, should be scheduled with MAX_JOB_PRIORITY. alreadyRunning: Used for unit test purposes only. This inserts the job in the running state. It is used when running a worker in standalone mode without hadoop- it gives it a job record to work with. retval: jobID of the inserted jobs row, or of an existing jobs row with matching client/jobHash key'
def _insertOrGetUniqueJobNoRetries(self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params, minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):
assert (len(client) <= self.CLIENT_MAX_LEN), ('client too long:' + repr(client)) assert cmdLine, ('Unexpected empty or None command-line: ' + repr(cmdLine)) assert (len(jobHash) == self.HASH_MAX_LEN), ('wrong hash len=%d' % len(jobHash)) if alreadyRunning: initStatus = self.STATUS_TESTMODE else: initStatus = self.STATUS_NOTSTARTED query = ('INSERT IGNORE INTO %s (status, client, client_info, client_key,cmd_line, params, job_hash, _eng_last_update_time, minimum_workers, maximum_workers, priority, _eng_job_type) VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), %%s, %%s, %%s, %%s) ' % (self.jobsTableName,)) sqlParams = (initStatus, client, clientInfo, clientKey, cmdLine, params, jobHash, minimumWorkers, maximumWorkers, priority, jobType) numRowsInserted = conn.cursor.execute(query, sqlParams) jobID = 0 if (numRowsInserted == 1): conn.cursor.execute('SELECT LAST_INSERT_ID()') jobID = conn.cursor.fetchall()[0][0] if (jobID == 0): self._logger.warn('_insertOrGetUniqueJobNoRetries: SELECT LAST_INSERT_ID() returned 0; likely due to reconnection in SteadyDB following INSERT. jobType=%r; client=%r; clientInfo=%r; clientKey=%s; jobHash=%r; cmdLine=%r', jobType, client, _abbreviate(clientInfo, 32), clientKey, jobHash, cmdLine) else: assert (numRowsInserted == 0), repr(numRowsInserted) if (jobID == 0): row = self._getOneMatchingRowNoRetries(self._jobs, conn, dict(client=client, job_hash=jobHash), ['job_id']) assert (row is not None) assert (len(row) == 1), ('Unexpected num fields: ' + repr(len(row))) jobID = row[0] if alreadyRunning: query = ('UPDATE %s SET _eng_cjm_conn_id=%%s, start_time=UTC_TIMESTAMP(), _eng_last_update_time=UTC_TIMESTAMP() WHERE job_id=%%s' % (self.jobsTableName,)) conn.cursor.execute(query, (self._connectionID, jobID)) return jobID
'Resumes processing of an existing job that is presently in the STATUS_COMPLETED state. NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO NOT use it on Hypersearch jobs. This prepares an existing job entry to resume processing. The CJM is always periodically sweeping the jobs table and when it finds a job that is ready to run, it will proceed to start it up on Hadoop. Parameters: conn: Owned connection acquired from ConnectionFactory.get() jobID: jobID of the job to resume alreadyRunning: Used for unit test purposes only. This inserts the job in the running state. It is used when running a worker in standalone mode without hadoop. raises: Throws a RuntimeError if no rows are affected. This could either be because: 1) Because there was not matching jobID 2) or if the status of the job was not STATUS_COMPLETED. retval: nothing'
def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
if alreadyRunning: initStatus = self.STATUS_TESTMODE else: initStatus = self.STATUS_NOTSTARTED assignments = ['status=%s', 'completion_reason=DEFAULT', 'completion_msg=DEFAULT', 'worker_completion_reason=DEFAULT', 'worker_completion_msg=DEFAULT', 'end_time=DEFAULT', 'cancel=DEFAULT', '_eng_last_update_time=UTC_TIMESTAMP()', '_eng_allocate_new_workers=DEFAULT', '_eng_untended_dead_workers=DEFAULT', 'num_failed_workers=DEFAULT', 'last_failed_worker_error_msg=DEFAULT', '_eng_cleaning_status=DEFAULT'] assignmentValues = [initStatus] if alreadyRunning: assignments += ['_eng_cjm_conn_id=%s', 'start_time=UTC_TIMESTAMP()', '_eng_last_update_time=UTC_TIMESTAMP()'] assignmentValues.append(self._connectionID) else: assignments += ['_eng_cjm_conn_id=DEFAULT', 'start_time=DEFAULT'] assignments = ', '.join(assignments) query = ('UPDATE %s SET %s WHERE job_id=%%s AND status=%%s' % (self.jobsTableName, assignments)) sqlParams = (assignmentValues + [jobID, self.STATUS_COMPLETED]) numRowsAffected = conn.cursor.execute(query, sqlParams) assert (numRowsAffected <= 1), repr(numRowsAffected) if (numRowsAffected == 0): self._logger.info('_resumeJobNoRetries: Redundant job-resume UPDATE: job was not suspended or was resumed by another process or operation was retried after connection failure; jobID=%s', jobID) return
'Return our connection ID. This can be used for worker identification purposes. NOTE: the actual MySQL connection ID used in queries may change from time to time if connection is re-acquired (e.g., upon MySQL server restart) or when more than one entry from the connection pool has been used (e.g., multi-threaded apps)'
def getConnectionID(self):
return self._connectionID
'Requests a job to be suspended NOTE: this is primarily for suspending Production Jobs; DO NOT use it on Hypersearch jobs. For canceling any job type, use jobCancel() instead! Parameters: jobID: jobID of the job to resume retval: nothing'
@logExceptions(_LOGGER) def jobSuspend(self, jobID):
self.jobCancel(jobID) return
'Resumes processing of an existing job that is presently in the STATUS_COMPLETED state. NOTE: this is primarily for resuming suspended Production Jobs; DO NOT use it on Hypersearch jobs. NOTE: The job MUST be in the STATUS_COMPLETED state at the time of this call, otherwise an exception will be raised. This prepares an existing job entry to resume processing. The CJM is always periodically sweeping the jobs table and when it finds a job that is ready to run, will proceed to start it up on Hadoop. Parameters: job: jobID of the job to resume alreadyRunning: Used for unit test purposes only. This inserts the job in the running state. It is used when running a worker in standalone mode without hadoop. raises: Throws a RuntimeError if no rows are affected. This could either be because: 1) Because there was not matching jobID 2) or if the status of the job was not STATUS_COMPLETED. retval: nothing'
@logExceptions(_LOGGER) def jobResume(self, jobID, alreadyRunning=False):
row = self.jobGetFields(jobID, ['status']) (jobStatus,) = row if (jobStatus != self.STATUS_COMPLETED): raise RuntimeError(('Failed to resume job: job was not suspended; jobID=%s; job status=%r' % (jobID, jobStatus))) @g_retrySQL def resumeWithRetries(): with ConnectionFactory.get() as conn: self._resumeJobNoRetries(conn, jobID, alreadyRunning) resumeWithRetries() return
'Add an entry to the jobs table for a new job request. This is called by clients that wish to startup a new job, like a Hypersearch, stream job, or specific model evaluation from the engine. This puts a new entry into the jobs table. The CJM is always periodically sweeping the jobs table and when it finds a new job, will proceed to start it up on Hadoop. Parameters: client: Name of the client submitting the job cmdLine: Command line to use to launch each worker process; must be a non-empty string clientInfo: JSON encoded dict of client specific information. clientKey: Foreign key. params: JSON encoded dict of the parameters for the job. This can be fetched out of the database by the worker processes based on the jobID. alreadyRunning: Used for unit test purposes only. This inserts the job in the running state. It is used when running a worker in standalone mode without hadoop - it gives it a job record to work with. minimumWorkers: minimum number of workers design at a time. maximumWorkers: maximum number of workers desired at a time. jobType: The type of job that this is. This should be one of the JOB_TYPE_XXXX enums. This is needed to allow a standard way of recognizing a job\'s function and capabilities. priority: Job scheduling priority; 0 is the default priority ( ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will be scheduled to run at the expense of the lower-priority jobs, and higher-priority job tasks will preempt those with lower priority if there is inadequate supply of scheduling slots. Excess lower priority job tasks will starve as long as slot demand exceeds supply. Most jobs should be scheduled with DEFAULT_JOB_PRIORITY. System jobs that must run at all cost, such as Multi-Model-Master, should be scheduled with MAX_JOB_PRIORITY. retval: jobID - unique ID assigned to this job'
@logExceptions(_LOGGER) def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='', alreadyRunning=False, minimumWorkers=0, maximumWorkers=0, jobType='', priority=DEFAULT_JOB_PRIORITY):
jobHash = self._normalizeHash(uuid.uuid1().bytes) @g_retrySQL def insertWithRetries(): with ConnectionFactory.get() as conn: return self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHash, clientInfo=clientInfo, clientKey=clientKey, params=params, minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers, jobType=jobType, priority=priority, alreadyRunning=alreadyRunning) try: jobID = insertWithRetries() except: self._logger.exception('jobInsert FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r;jobHash=%r; cmdLine=%r', jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine) raise else: self._logger.info('jobInsert: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r', jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine) return jobID
'Add an entry to the jobs table for a new job request, but only if the same job, by the same client is not already running. If the job is already running, or queued up to run, this call does nothing. If the job does not exist in the jobs table or has completed, it will be inserted and/or started up again. This method is called by clients, like StreamMgr, that wish to only start up a job if it hasn\'t already been started up. Parameters: client: Name of the client submitting the job cmdLine: Command line to use to launch each worker process; must be a non-empty string jobHash: unique hash of this job. The client must insure that this uniquely identifies this job request for the purposes of detecting duplicates. clientInfo: JSON encoded dict of client specific information. clientKey: Foreign key. params: JSON encoded dict of the parameters for the job. This can be fetched out of the database by the worker processes based on the jobID. minimumWorkers: minimum number of workers design at a time. maximumWorkers: maximum number of workers desired at a time. jobType: The type of job that this is. This should be one of the JOB_TYPE_XXXX enums. This is needed to allow a standard way of recognizing a job\'s function and capabilities. priority: Job scheduling priority; 0 is the default priority ( ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will be scheduled to run at the expense of the lower-priority jobs, and higher-priority job tasks will preempt those with lower priority if there is inadequate supply of scheduling slots. Excess lower priority job tasks will starve as long as slot demand exceeds supply. Most jobs should be scheduled with DEFAULT_JOB_PRIORITY. System jobs that must run at all cost, such as Multi-Model-Master, should be scheduled with MAX_JOB_PRIORITY. retval: jobID of the newly inserted or existing job.'
@logExceptions(_LOGGER) def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='', clientKey='', params='', minimumWorkers=0, maximumWorkers=0, jobType='', priority=DEFAULT_JOB_PRIORITY):
assert cmdLine, ('Unexpected empty or None command-line: ' + repr(cmdLine)) @g_retrySQL def insertUniqueWithRetries(): jobHashValue = self._normalizeHash(jobHash) jobID = None with ConnectionFactory.get() as conn: row = self._getOneMatchingRowNoRetries(self._jobs, conn, dict(client=client, job_hash=jobHashValue), ['job_id', 'status']) if (row is not None): (jobID, status) = row if (status == self.STATUS_COMPLETED): query = ('UPDATE %s SET client_info=%%s, client_key=%%s, cmd_line=%%s, params=%%s, minimum_workers=%%s, maximum_workers=%%s, priority=%%s, _eng_job_type=%%s WHERE (job_id=%%s AND status=%%s)' % (self.jobsTableName,)) sqlParams = (clientInfo, clientKey, cmdLine, params, minimumWorkers, maximumWorkers, priority, jobType, jobID, self.STATUS_COMPLETED) numRowsUpdated = conn.cursor.execute(query, sqlParams) assert (numRowsUpdated <= 1), repr(numRowsUpdated) if (numRowsUpdated == 0): self._logger.info('jobInsertUnique: Redundant job-reuse UPDATE: job restarted by another process, values were unchanged, or operation was retried after connection failure; jobID=%s', jobID) self._resumeJobNoRetries(conn, jobID, alreadyRunning=False) else: jobID = self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHashValue, clientInfo=clientInfo, clientKey=clientKey, params=params, minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers, jobType=jobType, priority=priority, alreadyRunning=False) return jobID try: jobID = insertUniqueWithRetries() except: self._logger.exception('jobInsertUnique FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r', jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine) raise else: self._logger.info('jobInsertUnique: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r', jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash, cmdLine) return jobID
'Place the given job in STATUS_RUNNING mode; the job is expected to be STATUS_NOTSTARTED. NOTE: this function was factored out of jobStartNext because it\'s also needed for testing (e.g., test_client_jobs_dao.py)'
@g_retrySQL def _startJobWithRetries(self, jobID):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, _eng_cjm_conn_id=%%s, start_time=UTC_TIMESTAMP(), _eng_last_update_time=UTC_TIMESTAMP() WHERE (job_id=%%s AND status=%%s)' % (self.jobsTableName,)) sqlParams = [self.STATUS_RUNNING, self._connectionID, jobID, self.STATUS_NOTSTARTED] numRowsUpdated = conn.cursor.execute(query, sqlParams) if (numRowsUpdated != 1): self._logger.warn('jobStartNext: numRowsUpdated=%r instead of 1; likely side-effect of transient connection failure', numRowsUpdated) return
'For use only by Nupic Scheduler (also known as ClientJobManager) Look through the jobs table and see if any new job requests have been queued up. If so, pick one and mark it as starting up and create the model table to hold the results Parameters: retval: jobID of the job we are starting up, if found; None if not found'
@logExceptions(_LOGGER) def jobStartNext(self):
row = self._getOneMatchingRowWithRetries(self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id']) if (row is None): return None (jobID,) = row self._startJobWithRetries(jobID) return jobID
'Look through the jobs table and reactivate all that are already in the running state by setting their _eng_allocate_new_workers fields to True; used by Nupic Scheduler as part of its failure-recovery procedure.'
@logExceptions(_LOGGER) @g_retrySQL def jobReactivateRunningJobs(self):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET _eng_cjm_conn_id=%%s, _eng_allocate_new_workers=TRUE WHERE status=%%s ' % (self.jobsTableName,)) conn.cursor.execute(query, [self._connectionID, self.STATUS_RUNNING]) return
'Look through the jobs table and get the demand - minimum and maximum number of workers requested, if new workers are to be allocated, if there are any untended dead workers, for all running jobs. Parameters: retval: list of ClientJobsDAO._jobs.jobDemandNamedTuple nametuples containing the demand - min and max workers, allocate_new_workers, untended_dead_workers, num_failed_workers for each running (STATUS_RUNNING) job. Empty list when there isn\'t any demand.'
@logExceptions(_LOGGER) def jobGetDemand(self):
rows = self._getMatchingRowsWithRetries(self._jobs, dict(status=self.STATUS_RUNNING), [self._jobs.pubToDBNameDict[f] for f in self._jobs.jobDemandNamedTuple._fields]) return [self._jobs.jobDemandNamedTuple._make(r) for r in rows]
'Set cancel field of all currently-running jobs to true.'
@logExceptions(_LOGGER) @g_retrySQL def jobCancelAllRunningJobs(self):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET cancel=TRUE WHERE status<>%%s ' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) return
'Look through the jobs table and count the running jobs whose cancel field is true. Parameters: retval: A count of running jobs with the cancel field set to true.'
@logExceptions(_LOGGER) @g_retrySQL def jobCountCancellingJobs(self):
with ConnectionFactory.get() as conn: query = ('SELECT COUNT(job_id) FROM %s WHERE (status<>%%s AND cancel is TRUE)' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return rows[0][0]
'Look through the jobs table and get the list of running jobs whose cancel field is true. Parameters: retval: A (possibly empty) sequence of running job IDs with cancel field set to true'
@logExceptions(_LOGGER) @g_retrySQL def jobGetCancellingJobs(self):
with ConnectionFactory.get() as conn: query = ('SELECT job_id FROM %s WHERE (status<>%%s AND cancel is TRUE)' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return tuple((r[0] for r in rows))
'Generator to allow iterating slices at dynamic intervals Parameters: data: Any data structure that supports slicing (i.e. list or tuple) *intervals: Iterable of intervals. The sum of intervals should be less than, or equal to the length of data.'
@staticmethod @logExceptions(_LOGGER) def partitionAtIntervals(data, intervals):
assert (sum(intervals) <= len(data)) start = 0 for interval in intervals: end = (start + interval) (yield data[start:end]) start = end raise StopIteration
'Return a list of namedtuples from the result of a join query. A single database result is partitioned at intervals corresponding to the fields in namedTuples. The return value is the result of applying namedtuple._make() to each of the partitions, for each of the namedTuples. Parameters: result: Tuple representing a single result from a database query *namedTuples: List of named tuples.'
@staticmethod @logExceptions(_LOGGER) def _combineResults(result, *namedTuples):
results = ClientJobsDAO.partitionAtIntervals(result, [len(nt._fields) for nt in namedTuples]) return [nt._make(result) for (nt, result) in zip(namedTuples, results)]
'Get all info about a job, with model details, if available. Parameters: job: jobID of the job to query retval: A sequence of two-tuples if the jobID exists in the jobs table (exeption is raised if it doesn\'t exist). Each two-tuple contains an instance of jobInfoNamedTuple as the first element and an instance of modelInfoNamedTuple as the second element. NOTE: In the case where there are no matching model rows, a sequence of one two-tuple will still be returned, but the modelInfoNamedTuple fields will be None, and the jobInfoNamedTuple fields will be populated.'
@logExceptions(_LOGGER) @g_retrySQL def jobInfoWithModels(self, jobID):
combinedResults = None with ConnectionFactory.get() as conn: query = ' '.join([('SELECT %s.*, %s.*' % (self.jobsTableName, self.modelsTableName)), ('FROM %s' % self.jobsTableName), ('LEFT JOIN %s USING(job_id)' % self.modelsTableName), 'WHERE job_id=%s']) conn.cursor.execute(query, (jobID,)) if (conn.cursor.rowcount > 0): combinedResults = [ClientJobsDAO._combineResults(result, self._jobs.jobInfoNamedTuple, self._models.modelInfoNamedTuple) for result in conn.cursor.fetchall()] if (combinedResults is not None): return combinedResults raise RuntimeError(('jobID=%s not found within the jobs table' % jobID))
'Get all info about a job Parameters: job: jobID of the job to query retval: namedtuple containing the job info.'
@logExceptions(_LOGGER) def jobInfo(self, jobID):
row = self._getOneMatchingRowWithRetries(self._jobs, dict(job_id=jobID), [self._jobs.pubToDBNameDict[n] for n in self._jobs.jobInfoNamedTuple._fields]) if (row is None): raise RuntimeError(('jobID=%s not found within the jobs table' % jobID)) return self._jobs.jobInfoNamedTuple._make(row)
'Change the status on the given job Parameters: job: jobID of the job to change status status: new status string (ClientJobsDAO.STATUS_xxxxx) useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to False for hypersearch workers'
@logExceptions(_LOGGER) @g_retrySQL def jobSetStatus(self, jobID, status, useConnectionID=True):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, _eng_last_update_time=UTC_TIMESTAMP() WHERE job_id=%%s' % (self.jobsTableName,)) sqlParams = [status, jobID] if useConnectionID: query += ' AND _eng_cjm_conn_id=%s' sqlParams.append(self._connectionID) result = conn.cursor.execute(query, sqlParams) if (result != 1): raise RuntimeError(('Tried to change the status of job %d to %s, but this job belongs to some other CJM' % (jobID, status)))
'Change the status on the given job to completed Parameters: job: jobID of the job to mark as completed completionReason: completionReason string completionMsg: completionMsg string useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to False for hypersearch workers'
@logExceptions(_LOGGER) @g_retrySQL def jobSetCompleted(self, jobID, completionReason, completionMsg, useConnectionID=True):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, completion_reason=%%s, completion_msg=%%s, end_time=UTC_TIMESTAMP(), _eng_last_update_time=UTC_TIMESTAMP() WHERE job_id=%%s' % (self.jobsTableName,)) sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg, jobID] if useConnectionID: query += ' AND _eng_cjm_conn_id=%s' sqlParams.append(self._connectionID) result = conn.cursor.execute(query, sqlParams) if (result != 1): raise RuntimeError(('Tried to change the status of jobID=%s to completed, but this job could not be found or belongs to some other CJM' % jobID))
'Cancel the given job. This will update the cancel field in the jobs table and will result in the job being cancelled. Parameters: jobID: jobID of the job to mark as completed to False for hypersearch workers'
@logExceptions(_LOGGER) def jobCancel(self, jobID):
self._logger.info('Canceling jobID=%s', jobID) self.jobSetFields(jobID, {'cancel': True}, useConnectionID=False)
'Fetch all the modelIDs that correspond to a given jobID; empty sequence if none'
@logExceptions(_LOGGER) def jobGetModelIDs(self, jobID):
rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID), ['model_id']) return [r[0] for r in rows]
'Return the number of jobs for the given clientInfo and a status that is not completed.'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobCountForClientInfo(self, clientInfo):
with ConnectionFactory.get() as conn: query = ('SELECT count(job_id) FROM %s WHERE client_info = %%s AND status != %%s' % self.jobsTableName) conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED]) activeJobCount = conn.cursor.fetchone()[0] return activeJobCount
'Return the number of jobs for the given clientKey and a status that is not completed.'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobCountForClientKey(self, clientKey):
with ConnectionFactory.get() as conn: query = ('SELECT count(job_id) FROM %s WHERE client_key = %%s AND status != %%s' % self.jobsTableName) conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED]) activeJobCount = conn.cursor.fetchone()[0] return activeJobCount
'Fetch jobIDs for jobs in the table with optional fields given a specific clientInfo'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobsForClientInfo(self, clientInfo, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s WHERE client_info = %%s AND status != %%s' % (dbFieldsStr, self.jobsTableName)) conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return rows
'Fetch jobIDs for jobs in the table with optional fields given a specific clientKey'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobsForClientKey(self, clientKey, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s WHERE client_key = %%s AND status != %%s' % (dbFieldsStr, self.jobsTableName)) conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return rows
'Fetch jobIDs for jobs in the table with optional fields'
@logExceptions(_LOGGER) @g_retrySQL def getJobs(self, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s' % (dbFieldsStr, self.jobsTableName)) conn.cursor.execute(query) rows = conn.cursor.fetchall() return rows
'Helper function for querying the models table including relevant job info where the job type matches the specified jobType. Only records for which there is a matching jobId in both tables is returned, and only the requested fields are returned in each result, assuming that there is not a conflict. This function is useful, for example, in querying a cluster for a list of actively running production models (according to the state of the client jobs database). jobType must be one of the JOB_TYPE_XXXX enumerations. Parameters: jobType: jobType enum fields: list of fields to return Returns: List of tuples containing the jobId and requested field values'
@logExceptions(_LOGGER) @g_retrySQL def getFieldsForActiveJobsOfType(self, jobType, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT DISTINCT %s FROM %s j LEFT JOIN %s m USING(job_id) WHERE j.status != %%s AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName, self.modelsTableName)) conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType]) return conn.cursor.fetchall()
'Fetch the values of 1 or more fields from a job record. Here, \'fields\' is a list with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). Parameters: jobID: jobID of the job record fields: list of fields to return Returns: A sequence of field values in the same order as the requested field list -> [field1, field2, ...]'
@logExceptions(_LOGGER) def jobGetFields(self, jobID, fields):
return self.jobsGetFields([jobID], fields, requireAll=True)[0][1]
'Fetch the values of 1 or more fields from a sequence of job records. Here, \'fields\' is a sequence (list or tuple) with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the job IDs passed in!!! Parameters: jobIDs: A sequence of jobIDs fields: A list of fields to return for each jobID Returns: A list of tuples->(jobID, [field1, field2,...])'
@logExceptions(_LOGGER) def jobsGetFields(self, jobIDs, fields, requireAll=True):
assert isinstance(jobIDs, self._SEQUENCE_TYPES) assert (len(jobIDs) >= 1) rows = self._getMatchingRowsWithRetries(self._jobs, dict(job_id=jobIDs), (['job_id'] + [self._jobs.pubToDBNameDict[x] for x in fields])) if (requireAll and (len(rows) < len(jobIDs))): raise RuntimeError(('jobIDs %s not found within the jobs table' % ((set(jobIDs) - set((r[0] for r in rows))),))) return [(r[0], list(r[1:])) for r in rows]