desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Read in all standard configuration files'
@classmethod def _readStdConfigFiles(cls):
cls.readConfigFile(DEFAULT_CONFIG) cls.readConfigFile(USER_CONFIG)
'Return a dict containing all custom configuration properties Parameters: retval: dict containing all custom configuration properties.'
@classmethod def getCustomDict(cls):
return _CustomConfigurationFileWrapper.getCustomDict()
'Set a single custom setting and persist it to the custom configuration store. Parameters: propertyName: string containing the name of the property to get value: value to set the property to'
@classmethod def setCustomProperty(cls, propertyName, value):
cls.setCustomProperties({propertyName: value})
'Set multiple custom properties and persist them to the custom configuration store. Parameters: properties: a dict of property name/value pairs to set'
@classmethod def setCustomProperties(cls, properties):
_getLogger().info('Setting custom configuration properties=%r; caller=%r', properties, traceback.format_stack()) _CustomConfigurationFileWrapper.edit(properties) for (propertyName, value) in properties.iteritems(): cls.set(propertyName, value)
'Clear all configuration properties from in-memory cache, but do NOT alter the custom configuration file. Used in unit-testing.'
@classmethod def clear(cls):
super(Configuration, cls).clear() _CustomConfigurationFileWrapper.clear(persistent=False)
'Clear all custom configuration settings and delete the persistent custom configuration store.'
@classmethod def resetCustomConfig(cls):
_getLogger().info('Resetting all custom configuration properties; caller=%r', traceback.format_stack()) super(Configuration, cls).clear() _CustomConfigurationFileWrapper.clear(persistent=True)
'Loads custom configuration settings from their persistent storage. DO NOT CALL THIS: It\'s typically not necessary to call this method directly - see NOTE below. NOTE: this method exists *solely* for the benefit of prepare_conf.py, which needs to load configuration files selectively.'
@classmethod def loadCustomConfig(cls):
cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)
'Intercept the _readStdConfigFiles call from our base config class to read in base and custom configuration settings.'
@classmethod def _readStdConfigFiles(cls):
super(Configuration, cls)._readStdConfigFiles() cls.loadCustomConfig()
'If persistent is True, delete the temporary file Parameters: persistent: if True, custom configuration file is deleted'
@classmethod def clear(cls, persistent=False):
if persistent: try: os.unlink(cls.getPath()) except OSError as e: if (e.errno != errno.ENOENT): _getLogger().exception('Error %s while trying to remove dynamic configuration file: %s', e.errno, cls.getPath()) raise cls._path = None
'Returns a dict of all temporary values in custom configuration file'
@classmethod def getCustomDict(cls):
if (not os.path.exists(cls.getPath())): return dict() properties = Configuration._readConfigFile(os.path.basename(cls.getPath()), os.path.dirname(cls.getPath())) values = dict() for propName in properties: if ('value' in properties[propName]): values[propName] = properties[propName]['value'] return values
'Edits the XML configuration file with the parameters specified by properties Parameters: properties: dict of settings to be applied to the custom configuration store (key is property name, value is value)'
@classmethod def edit(cls, properties):
copyOfProperties = copy(properties) configFilePath = cls.getPath() try: with open(configFilePath, 'r') as fp: contents = fp.read() except IOError as e: if (e.errno != errno.ENOENT): _getLogger().exception('Error %s reading custom configuration store from %s, while editing properties %s.', e.errno, configFilePath, properties) raise contents = '<configuration/>' try: elements = ElementTree.XML(contents) ElementTree.tostring(elements) except Exception as e: msg = ("File contents of custom configuration is corrupt. File location: %s; Contents: '%s'. Original Error (%s): %s." % (configFilePath, contents, type(e), e)) _getLogger().exception(msg) raise RuntimeError(msg), None, sys.exc_info()[2] if (elements.tag != 'configuration'): e = ("Expected top-level element to be 'configuration' but got '%s'" % elements.tag) _getLogger().error(e) raise RuntimeError(e) for propertyItem in elements.findall('./property'): propInfo = dict(((attr.tag, attr.text) for attr in propertyItem)) name = propInfo['name'] if (name in copyOfProperties): foundValues = propertyItem.findall('./value') if (len(foundValues) > 0): foundValues[0].text = str(copyOfProperties.pop(name)) if (not copyOfProperties): break else: e = ('Property %s missing value tag.' % (name,)) _getLogger().error(e) raise RuntimeError(e) for (propertyName, value) in copyOfProperties.iteritems(): newProp = ElementTree.Element('property') nameTag = ElementTree.Element('name') nameTag.text = propertyName newProp.append(nameTag) valueTag = ElementTree.Element('value') valueTag.text = str(value) newProp.append(valueTag) elements.append(newProp) try: makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath)) with open(configFilePath, 'w') as fp: fp.write(ElementTree.tostring(elements)) except Exception as e: _getLogger().exception('Error while saving custom configuration properties %s in %s.', properties, configFilePath) raise
'Sets the path of the custom configuration file'
@classmethod def _setPath(cls):
cls._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'], cls.customFileName)
'Get the path of the custom configuration file'
@classmethod def getPath(cls):
if (cls._path is None): cls._setPath() return cls._path
'Return the current state of this particle. This is used for communicating our state into a model record entry so that it can be instantiated on another worker.'
def getState(self):
raise NotImplementedError
'Set the current state of this particle. This is counterpart to getState.'
def setState(self, state):
raise NotImplementedError
'for int vars, returns position to nearest int Parameters: retval: current position'
def getPosition(self):
raise NotImplementedError
'This causes the variable to jiggle away from its current position. It does this by increasing its velocity by a multiplicative factor. Every time agitate() is called, the velocity will increase. In this way, you can call agitate over and over again until the variable reaches a new position.'
def agitate(self):
raise NotImplementedError
'Choose a new position based on results obtained so far from other particles and the passed in globalBestPosition. Parameters: globalBestPosition: global best position for this colony rng: instance of random.Random() used for generating random numbers retval: new position'
def newPosition(self, globalBestPosition, rng):
raise NotImplementedError
'Choose a new position that is as far away as possible from all \'otherVars\', where \'otherVars\' is a list of PermuteVariable instances. Parameters: otherVars: list of other PermuteVariables to push away from rng: instance of random.Random() used for generating random numbers'
def pushAwayFrom(self, otherVars, rng):
raise NotImplementedError
'Reset the velocity to be some fraction of the total distance. This is called usually when we start a new swarm and want to start at the previous best position found in the previous swarm but with a velocity which is a known fraction of the total distance between min and max. Parameters: rng: instance of random.Random() used for generating random numbers'
def resetVelocity(self, rng):
raise NotImplementedError
'Construct a variable that permutes over floating point values using the Particle Swarm Optimization (PSO) algorithm. See descriptions of PSO (i.e. http://en.wikipedia.org/wiki/Particle_swarm_optimization) for references to the inertia, cogRate, and socRate parameters. Parameters: min: min allowed value of position max: max allowed value of position stepSize: if not None, the position must be at min + N * stepSize, where N is an integer inertia: The inertia for the particle. cogRate: This parameter controls how much the particle is affected by its distance from it\'s local best position socRate: This parameter controls how much the particle is affected by its distance from the global best position'
def __init__(self, min, max, stepSize=None, inertia=None, cogRate=None, socRate=None):
super(PermuteFloat, self).__init__() self.min = min self.max = max self.stepSize = stepSize self._position = ((self.max + self.min) / 2.0) self._velocity = ((self.max - self.min) / 5.0) self._inertia = (float(Configuration.get('nupic.hypersearch.inertia')) if (inertia is None) else inertia) self._cogRate = (float(Configuration.get('nupic.hypersearch.cogRate')) if (cogRate is None) else cogRate) self._socRate = (float(Configuration.get('nupic.hypersearch.socRate')) if (socRate is None) else socRate) self._bestPosition = self.getPosition() self._bestResult = None
'See comments in base class.'
def __repr__(self):
return ('PermuteFloat(min=%f, max=%f, stepSize=%s) [position=%f(%f), velocity=%f, _bestPosition=%s, _bestResult=%s]' % (self.min, self.max, self.stepSize, self.getPosition(), self._position, self._velocity, self._bestPosition, self._bestResult))
'See comments in base class.'
def getState(self):
return dict(_position=self._position, position=self.getPosition(), velocity=self._velocity, bestPosition=self._bestPosition, bestResult=self._bestResult)
'See comments in base class.'
def setState(self, state):
self._position = state['_position'] self._velocity = state['velocity'] self._bestPosition = state['bestPosition'] self._bestResult = state['bestResult']
'See comments in base class.'
def getPosition(self):
if (self.stepSize is None): return self._position numSteps = ((self._position - self.min) / self.stepSize) numSteps = int(round(numSteps)) position = (self.min + (numSteps * self.stepSize)) position = max(self.min, position) position = min(self.max, position) return position
'See comments in base class.'
def agitate(self):
self._velocity *= (1.5 / self._inertia) maxV = ((self.max - self.min) / 2) if (self._velocity > maxV): self._velocity = maxV elif (self._velocity < (- maxV)): self._velocity = (- maxV) if ((self._position == self.max) and (self._velocity > 0)): self._velocity *= (-1) if ((self._position == self.min) and (self._velocity < 0)): self._velocity *= (-1)
'See comments in base class.'
def newPosition(self, globalBestPosition, rng):
lb = float(Configuration.get('nupic.hypersearch.randomLowerBound')) ub = float(Configuration.get('nupic.hypersearch.randomUpperBound')) self._velocity = ((self._velocity * self._inertia) + ((rng.uniform(lb, ub) * self._cogRate) * (self._bestPosition - self.getPosition()))) if (globalBestPosition is not None): self._velocity += ((rng.uniform(lb, ub) * self._socRate) * (globalBestPosition - self.getPosition())) self._position += self._velocity self._position = max(self.min, self._position) self._position = min(self.max, self._position) return self.getPosition()
'See comments in base class.'
def pushAwayFrom(self, otherPositions, rng):
if (self.max == self.min): return numPositions = (len(otherPositions) * 4) if (numPositions == 0): return stepSize = (float((self.max - self.min)) / numPositions) positions = numpy.arange(self.min, (self.max + stepSize), stepSize) numPositions = len(positions) weights = numpy.zeros(numPositions) maxDistanceSq = ((-1) * (stepSize ** 2)) for pos in otherPositions: distances = (pos - positions) varWeights = numpy.exp((numpy.power(distances, 2) / maxDistanceSq)) weights += varWeights positionIdx = weights.argmin() self._position = positions[positionIdx] self._bestPosition = self.getPosition() self._velocity *= rng.choice([1, (-1)])
'See comments in base class.'
def resetVelocity(self, rng):
maxVelocity = ((self.max - self.min) / 5.0) self._velocity = maxVelocity self._velocity *= rng.choice([1, (-1)])
'See comments in base class.'
def __repr__(self):
return ('PermuteInt(min=%d, max=%d, stepSize=%d) [position=%d(%f), velocity=%f, _bestPosition=%s, _bestResult=%s]' % (self.min, self.max, self.stepSize, self.getPosition(), self._position, self._velocity, self._bestPosition, self._bestResult))
'See comments in base class.'
def getPosition(self):
position = super(PermuteInt, self).getPosition() position = int(round(position)) return position
'See comments in base class.'
def __repr__(self):
return ('PermuteChoices(choices=%s) [position=%s]' % (self.choices, self.choices[self._positionIdx]))
'See comments in base class.'
def getState(self):
return dict(_position=self.getPosition(), position=self.getPosition(), velocity=None, bestPosition=self.choices[self._bestPositionIdx], bestResult=self._bestResult)
'See comments in base class.'
def setState(self, state):
self._positionIdx = self.choices.index(state['_position']) self._bestPositionIdx = self.choices.index(state['bestPosition']) self._bestResult = state['bestResult']
'Setup our resultsPerChoice history based on the passed in resultsPerChoice. For example, if this variable has the following choices: [\'a\', \'b\', \'c\'] resultsPerChoice will have up to 3 elements, each element is a tuple containing (choiceValue, errors) where errors is the list of errors received from models that used the specific choice: retval: [(\'a\', [0.1, 0.2, 0.3]), (\'b\', [0.5, 0.1, 0.6]), (\'c\', [0.2])]'
def setResultsPerChoice(self, resultsPerChoice):
self._resultsPerChoice = ([[]] * len(self.choices)) for (choiceValue, values) in resultsPerChoice: choiceIndex = self.choices.index(choiceValue) self._resultsPerChoice[choiceIndex] = list(values)
'See comments in base class.'
def getPosition(self):
return self.choices[self._positionIdx]
'See comments in base class.'
def agitate(self):
pass
'See comments in base class.'
def newPosition(self, globalBestPosition, rng):
numChoices = len(self.choices) meanScorePerChoice = [] overallSum = 0 numResults = 0 for i in range(numChoices): if (len(self._resultsPerChoice[i]) > 0): data = numpy.array(self._resultsPerChoice[i]) meanScorePerChoice.append(data.mean()) overallSum += data.sum() numResults += data.size else: meanScorePerChoice.append(None) if (numResults == 0): overallSum = 1.0 numResults = 1 for i in range(numChoices): if (meanScorePerChoice[i] is None): meanScorePerChoice[i] = (overallSum / numResults) meanScorePerChoice = numpy.array(meanScorePerChoice) meanScorePerChoice = ((1.1 * meanScorePerChoice.max()) - meanScorePerChoice) if self._fixEarly: meanScorePerChoice **= ((numResults * self._fixEarlyFactor) / numChoices) total = meanScorePerChoice.sum() if (total == 0): total = 1.0 meanScorePerChoice /= total distribution = meanScorePerChoice.cumsum() r = (rng.random() * distribution[(-1)]) choiceIdx = numpy.where((r <= distribution))[0][0] self._positionIdx = choiceIdx return self.getPosition()
'See comments in base class.'
def pushAwayFrom(self, otherPositions, rng):
positions = [self.choices.index(x) for x in otherPositions] positionCounts = ([0] * len(self.choices)) for pos in positions: positionCounts[pos] += 1 self._positionIdx = numpy.array(positionCounts).argmin() self._bestPositionIdx = self._positionIdx
'See comments in base class.'
def resetVelocity(self, rng):
pass
'See comments in base class.'
def __repr__(self):
suffix = '' for (key, value) in self.kwArgs.items(): suffix += ('%s=%s, ' % (key, value)) return ('PermuteEncoder(fieldName=%s, encoderClass=%s, name=%s, %s)' % (self.fieldName, self.encoderClass, self.name, suffix))
'Return a dict that can be used to construct this encoder. This dict can be passed directly to the addMultipleEncoders() method of the multi encoder. Parameters: encoderName: name of the encoder flattenedChosenValues: dict of the flattened permutation variables. Any variables within this dict whose key starts with encoderName will be substituted for encoder constructor args which are being permuted over.'
def getDict(self, encoderName, flattenedChosenValues):
encoder = dict(fieldname=self.fieldName, name=self.name) for (encoderArg, value) in self.kwArgs.iteritems(): if isinstance(value, PermuteVariable): value = flattenedChosenValues[('%s:%s' % (encoderName, encoderArg))] encoder[encoderArg] = value if ('.' in self.encoderClass): (encoder['type'], argName) = self.encoderClass.split('.') argValue = (encoder['w'], encoder['radius']) encoder[argName] = argValue encoder.pop('w') encoder.pop('radius') else: encoder['type'] = self.encoderClass return encoder
'Run a bunch of iterations on a PermuteVar and collect which positions were visited. Verify that they were all valid.'
def _testValidPositions(self, varClass, minValue, maxValue, stepSize, iterations=100):
positions = set() cogRate = 2.0 socRate = 2.0 inertia = None gBestPosition = maxValue lBestPosition = minValue foundBestPosition = None foundBestResult = None rng = random.Random() rng.seed(42) var = varClass(min=minValue, max=maxValue, stepSize=stepSize, inertia=inertia, cogRate=cogRate, socRate=socRate) for _ in xrange(iterations): pos = var.getPosition() if (self.verbosity >= 1): print ('pos: %f' % pos), if (self.verbosity >= 2): print var positions.add(pos) result = (1.0 - abs((pos - lBestPosition))) if ((foundBestResult is None) or (result > foundBestResult)): foundBestResult = result foundBestPosition = pos state = var.getState() state['bestPosition'] = foundBestPosition state['bestResult'] = foundBestResult var.setState(state) var.newPosition(gBestPosition, rng) positions = sorted(positions) print ('Positions visited (%d):' % len(positions)), positions assert (max(positions) <= maxValue) assert (min(positions) <= minValue) assert (len(positions) <= (int(round(((maxValue - minValue) / stepSize))) + 1))
'Test that we can converge on the right answer.'
def _testConvergence(self, varClass, minValue, maxValue, targetValue, iterations=100):
gBestPosition = targetValue lBestPosition = targetValue foundBestPosition = None foundBestResult = None rng = random.Random() rng.seed(42) var = varClass(min=minValue, max=maxValue) for _ in xrange(iterations): pos = var.getPosition() if (self.verbosity >= 1): print ('pos: %f' % pos), if (self.verbosity >= 2): print var result = (1.0 - abs((pos - lBestPosition))) if ((foundBestResult is None) or (result > foundBestResult)): foundBestResult = result foundBestPosition = pos state = var.getState() state['bestPosition'] = foundBestPosition state['bestResult'] = foundBestResult var.setState(state) var.newPosition(gBestPosition, rng) print ('Target: %f, Converged on: %f' % (targetValue, pos)) assert (abs((pos - targetValue)) < 0.001)
'Run unit tests on this module.'
def run(self):
self.verbosity = 0 self._testValidPositions(varClass=PermuteFloat, minValue=2.1, maxValue=5.1, stepSize=0.5) self._testValidPositions(varClass=PermuteInt, minValue=2, maxValue=11, stepSize=3) self._testValidPositions(varClass=PermuteInt, minValue=2, maxValue=11, stepSize=1) self._testConvergence(varClass=PermuteFloat, minValue=2.1, maxValue=5.1, targetValue=5.0) self._testConvergence(varClass=PermuteFloat, minValue=2.1, maxValue=5.1, targetValue=2.2) self._testConvergence(varClass=PermuteFloat, minValue=2.1, maxValue=5.1, targetValue=3.5) self._testConvergence(varClass=PermuteInt, minValue=1, maxValue=20, targetValue=19) self._testConvergence(varClass=PermuteInt, minValue=1, maxValue=20, targetValue=1) self._testChoices()
'Create a particle. There are 3 fundamentally different methods of instantiating a particle: 1.) You can instantiate a new one from scratch, at generation index #0. This particle gets a new particleId. required: swarmId optional: newFarFrom must be None: evolveFromState, newFromClone 2.) You can instantiate one from savedState, in which case it\'s generation index is incremented (from the value stored in the saved state) and its particleId remains the same. required: evolveFromState optional: must be None: flattenedPermuteVars, swarmId, newFromClone 3.) You can clone another particle, creating a new particle at the same generationIdx but a different particleId. This new particle will end up at exactly the same position as the one it was cloned from. If you want to move it to the next position, or just jiggle it a bit, call newPosition() or agitate() after instantiation. required: newFromClone optional: must be None: flattenedPermuteVars, swarmId, evolveFromState Parameters: hsObj: The HypersearchV2 instance resultsDB: the ResultsDB instance that holds all the model results flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs of the flattened permutation variables as read from the permutations file. swarmId: String that represents the encoder names of the encoders that are to be included in this particle\'s model. Of the form \'encoder1.encoder2\'. Required for creation method #1. newFarFrom: If not None, this is a list of other particleState dicts in the swarm that we want to be as far away from as possible. Optional argument for creation method #1. evolveFromState: If not None, evolve an existing particle. This is a dict containing the particle\'s state. Preserve the particleId, but increment the generation index. Required for creation method #2. newFromClone: If not None, clone this other particle\'s position and generation index, with small random perturbations. This is a dict containing the particle\'s state. Required for creation method #3. newParticleId: Only applicable when newFromClone is True. Give the clone a new particle ID.'
def __init__(self, hsObj, resultsDB, flattenedPermuteVars, swarmId=None, newFarFrom=None, evolveFromState=None, newFromClone=None, newParticleId=False):
self._hsObj = hsObj self.logger = hsObj.logger self._resultsDB = resultsDB self._rng = random.Random() self._rng.seed(42) def _setupVars(flattenedPermuteVars): allowedEncoderNames = self.swarmId.split('.') self.permuteVars = copy.deepcopy(flattenedPermuteVars) varNames = self.permuteVars.keys() for varName in varNames: if (':' in varName): if (varName.split(':')[0] not in allowedEncoderNames): self.permuteVars.pop(varName) continue if isinstance(self.permuteVars[varName], PermuteChoices): if self._hsObj._speculativeParticles: maxGenIdx = None else: maxGenIdx = (self.genIdx - 1) resultsPerChoice = self._resultsDB.getResultsPerChoice(swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName) self.permuteVars[varName].setResultsPerChoice(resultsPerChoice.values()) if (swarmId is not None): assert (evolveFromState is None) assert (newFromClone is None) self.swarmId = swarmId self.particleId = ('%s.%s' % (str(self._hsObj._workerID), str(Particle._nextParticleID))) Particle._nextParticleID += 1 self.genIdx = 0 _setupVars(flattenedPermuteVars) if (newFarFrom is not None): for varName in self.permuteVars.iterkeys(): otherPositions = [] for particleState in newFarFrom: otherPositions.append(particleState['varStates'][varName]['position']) self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng) self._rng.seed(str(otherPositions)) elif (evolveFromState is not None): assert (swarmId is None) assert (newFarFrom is None) assert (newFromClone is None) self.particleId = evolveFromState['id'] self.genIdx = (evolveFromState['genIdx'] + 1) self.swarmId = evolveFromState['swarmId'] _setupVars(flattenedPermuteVars) self.initStateFrom(self.particleId, evolveFromState, newBest=True) self.newPosition() elif (newFromClone is not None): assert (swarmId is None) assert (newFarFrom is None) assert (evolveFromState is None) self.particleId = newFromClone['id'] if newParticleId: self.particleId = ('%s.%s' % (str(self._hsObj._workerID), str(Particle._nextParticleID))) Particle._nextParticleID += 1 self.genIdx = newFromClone['genIdx'] self.swarmId = newFromClone['swarmId'] _setupVars(flattenedPermuteVars) self.initStateFrom(self.particleId, newFromClone, newBest=False) else: assert False, 'invalid creation parameters' self.logger.debug(('Created particle: %s' % str(self)))
'Get the particle state as a dict. This is enough information to instantiate this particle on another worker.'
def getState(self):
varStates = dict() for (varName, var) in self.permuteVars.iteritems(): varStates[varName] = var.getState() return dict(id=self.particleId, genIdx=self.genIdx, swarmId=self.swarmId, varStates=varStates)
'Init all of our variable positions, velocities, and optionally the best result and best position from the given particle. If newBest is true, we get the best result and position for this new generation from the resultsDB, This is used when evoloving a particle because the bestResult and position as stored in was the best AT THE TIME THAT PARTICLE STARTED TO RUN and does not include the best since that particle completed.'
def initStateFrom(self, particleId, particleState, newBest):
if newBest: (bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId) else: bestResult = bestPosition = None varStates = particleState['varStates'] for varName in varStates.keys(): varState = copy.deepcopy(varStates[varName]) if newBest: varState['bestResult'] = bestResult if (bestPosition is not None): varState['bestPosition'] = bestPosition[varName] self.permuteVars[varName].setState(varState)
'Copy all encoder variables from particleState into this particle. Parameters: particleState: dict produced by a particle\'s getState() method'
def copyEncoderStatesFrom(self, particleState):
allowedToMove = True for varName in particleState['varStates']: if (':' in varName): if (varName not in self.permuteVars): continue state = copy.deepcopy(particleState['varStates'][varName]) state['_position'] = state['position'] state['bestPosition'] = state['position'] if (not allowedToMove): state['velocity'] = 0 self.permuteVars[varName].setState(state) if allowedToMove: self.permuteVars[varName].resetVelocity(self._rng)
'Copy specific variables from particleState into this particle. Parameters: particleState: dict produced by a particle\'s getState() method varNames: which variables to copy'
def copyVarStatesFrom(self, particleState, varNames):
allowedToMove = True for varName in particleState['varStates']: if (varName in varNames): if (varName not in self.permuteVars): continue state = copy.deepcopy(particleState['varStates'][varName]) state['_position'] = state['position'] state['bestPosition'] = state['position'] if (not allowedToMove): state['velocity'] = 0 self.permuteVars[varName].setState(state) if allowedToMove: self.permuteVars[varName].resetVelocity(self._rng)
'Return the position of this particle. This returns a dict() of key value pairs where each key is the name of the flattened permutation variable and the value is its chosen value. Parameters: retval: dict() of flattened permutation choices'
def getPosition(self):
result = dict() for (varName, value) in self.permuteVars.iteritems(): result[varName] = value.getPosition() return result
'Return the position of a particle given its state dict. Parameters: retval: dict() of particle position, keys are the variable names, values are their positions'
@staticmethod def getPositionFromState(pState):
result = dict() for (varName, value) in pState['varStates'].iteritems(): result[varName] = value['position'] return result
'Agitate this particle so that it is likely to go to a new position. Every time agitate is called, the particle is jiggled an even greater amount. Parameters: retval: None'
def agitate(self):
for (varName, var) in self.permuteVars.iteritems(): var.agitate() self.newPosition()
'Choose a new position based on results obtained so far from all other particles. Parameters: whichVars: If not None, only move these variables retval: new position'
def newPosition(self, whichVars=None):
globalBestPosition = None if self._hsObj._speculativeParticles: genIdx = self.genIdx else: genIdx = (self.genIdx - 1) if (genIdx >= 0): (bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId, genIdx) if (bestModelId is not None): (particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId) globalBestPosition = Particle.getPositionFromState(particleState) for (varName, var) in self.permuteVars.iteritems(): if ((whichVars is not None) and (varName not in whichVars)): continue if (globalBestPosition is None): var.newPosition(None, self._rng) else: var.newPosition(globalBestPosition[varName], self._rng) position = self.getPosition() if (self.logger.getEffectiveLevel() <= logging.DEBUG): msg = StringIO.StringIO() print >>msg, ('New particle position: \n%s' % pprint.pformat(position, indent=4)) print >>msg, 'Particle variables:' for (varName, var) in self.permuteVars.iteritems(): print >>msg, (' %s: %s' % (varName, str(var))) self.logger.debug(msg.getvalue()) msg.close() return position
'TODO: Documentation'
def __init__(self, jobID, jobsDAO, logLevel=None):
self._jobID = jobID self._cjDB = jobsDAO self._lastUpdateAttemptTime = 0 initLogging(verbose=True) self.logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__])) if (logLevel is not None): self.logger.setLevel(logLevel) self.logger.info(('Created new ModelChooser for job %s' % str(jobID)))
'Chooses the best model for a given job. Parameters forceUpdate: (True/False). If True, the update will ignore all the restrictions on the minimum time to update and the minimum number of records to update. This should typically only be set to true if the model has completed running'
def updateResultsForJob(self, forceUpdate=True):
updateInterval = (time.time() - self._lastUpdateAttemptTime) if ((updateInterval < self._MIN_UPDATE_INTERVAL) and (not forceUpdate)): return self.logger.info(('Attempting model selection for jobID=%d: time=%f lastUpdate=%f' % (self._jobID, time.time(), self._lastUpdateAttemptTime))) timestampUpdated = self._cjDB.jobUpdateSelectionSweep(self._jobID, self._MIN_UPDATE_INTERVAL) if (not timestampUpdated): self.logger.info(('Unable to update selection sweep timestamp: jobID=%d updateTime=%f' % (self._jobID, self._lastUpdateAttemptTime))) if (not forceUpdate): return self._lastUpdateAttemptTime = time.time() self.logger.info(('Succesfully updated selection sweep timestamp jobid=%d updateTime=%f' % (self._jobID, self._lastUpdateAttemptTime))) minUpdateRecords = self._MIN_UPDATE_THRESHOLD jobResults = self._getJobResults() if (forceUpdate or (jobResults is None)): minUpdateRecords = 0 (candidateIDs, bestMetric) = self._cjDB.modelsGetCandidates(self._jobID, minUpdateRecords) self.logger.info(('Candidate models=%s, metric=%s, jobID=%s' % (candidateIDs, bestMetric, self._jobID))) if (len(candidateIDs) == 0): return self._jobUpdateCandidate(candidateIDs[0], bestMetric, results=jobResults)
'Parameters: options: NupicRunPermutations options dict retval: nothing'
def __init__(self, options):
self.__cjDAO = _clientJobsDB() self._options = options self.__searchJob = None self.__foundMetrcsKeySet = set() self._workers = None return
'Start a new hypersearch job and monitor it to completion Parameters: retval: nothing'
def runNewSearch(self):
self.__searchJob = self.__startSearch() self.monitorSearchJob()
'Pick up the latest search from a saved jobID and monitor it to completion Parameters: retval: nothing'
def pickupSearch(self):
self.__searchJob = self.loadSavedHyperSearchJob(permWorkDir=self._options['permWorkDir'], outputLabel=self._options['outputLabel']) self.monitorSearchJob()
'Parameters: retval: nothing'
def monitorSearchJob(self):
assert (self.__searchJob is not None) jobID = self.__searchJob.getJobID() startTime = time.time() lastUpdateTime = datetime.now() expectedNumModels = self.__searchJob.getExpectedNumModels(searchMethod=self._options['searchMethod']) lastNumFinished = 0 finishedModelIDs = set() finishedModelStats = _ModelStats() lastWorkerState = None lastJobResults = None lastModelMilestones = None lastEngStatus = None hyperSearchFinished = False while (not hyperSearchFinished): jobInfo = self.__searchJob.getJobStatus(self._workers) hyperSearchFinished = jobInfo.isFinished() modelIDs = self.__searchJob.queryModelIDs() _emit(Verbosity.DEBUG, ('Current number of models is %d (%d of them completed)' % (len(modelIDs), len(finishedModelIDs)))) if (len(modelIDs) > 0): checkModelIDs = [] for modelID in modelIDs: if (modelID not in finishedModelIDs): checkModelIDs.append(modelID) del modelIDs if checkModelIDs: _emit(Verbosity.DEBUG, ('Checking %d models...' % len(checkModelIDs))) errorCompletionMsg = None for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)): _emit(Verbosity.DEBUG, ('[%s] Checking completion: %s' % (i, modelInfo))) if modelInfo.isFinished(): finishedModelIDs.add(modelInfo.getModelID()) finishedModelStats.update(modelInfo) if (modelInfo.getCompletionReason().isError() and (not errorCompletionMsg)): errorCompletionMsg = modelInfo.getCompletionMsg() metrics = modelInfo.getReportMetrics() self.__foundMetrcsKeySet.update(metrics.keys()) numFinished = len(finishedModelIDs) if (numFinished != lastNumFinished): lastNumFinished = numFinished if (expectedNumModels is None): expModelsStr = '' else: expModelsStr = ('of %s' % expectedNumModels) stats = finishedModelStats print ('<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: %s; %s: %s; %s: %s; %s: %s; %s: %s]' % (jobID, numFinished, expModelsStr, (stats.numCompletedEOF + stats.numCompletedStopped), ('EOF' if stats.numCompletedEOF else 'eof'), stats.numCompletedEOF, ('STOPPED' if stats.numCompletedStopped else 'stopped'), stats.numCompletedStopped, ('KILLED' if stats.numCompletedKilled else 'killed'), stats.numCompletedKilled, ('ERROR' if stats.numCompletedError else 'error'), stats.numCompletedError, ('ORPHANED' if stats.numCompletedError else 'orphaned'), stats.numCompletedOrphaned, ('UNKNOWN' if stats.numCompletedOther else 'unknown'), stats.numCompletedOther)) if errorCompletionMsg: print ('ERROR MESSAGE: %s' % errorCompletionMsg) workerState = jobInfo.getWorkerState() if (workerState != lastWorkerState): print ('##>> UPDATED WORKER STATE: \n%s' % pprint.pformat(workerState, indent=4)) lastWorkerState = workerState jobResults = jobInfo.getResults() if (jobResults != lastJobResults): print ('####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)' % (pprint.pformat(jobResults, indent=4), (time.time() - startTime))) lastJobResults = jobResults modelMilestones = jobInfo.getModelMilestones() if (modelMilestones != lastModelMilestones): print ('##>> UPDATED MODEL MILESTONES: \n%s' % pprint.pformat(modelMilestones, indent=4)) lastModelMilestones = modelMilestones engStatus = jobInfo.getEngStatus() if (engStatus != lastEngStatus): print ('##>> UPDATED STATUS: \n%s' % engStatus) lastEngStatus = engStatus if (not hyperSearchFinished): if (self._options['timeout'] != None): if ((datetime.now() - lastUpdateTime) > timedelta(minutes=self._options['timeout'])): print 'Timeout reached, exiting' self.__cjDAO.jobCancel(jobID) sys.exit(1) time.sleep(1) modelIDs = self.__searchJob.queryModelIDs() print ('Evaluated %s models' % len(modelIDs)) print 'HyperSearch finished!' jobInfo = self.__searchJob.getJobStatus(self._workers) print ('Worker completion message: %s' % jobInfo.getWorkerCompletionMsg())
'Launch worker processes to execute the given command line Parameters: cmdLine: The command line for each worker numWorkers: number of workers to launch'
def _launchWorkers(self, cmdLine, numWorkers):
self._workers = [] for i in range(numWorkers): stdout = tempfile.TemporaryFile() stderr = tempfile.TemporaryFile() p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True, stdin=None, stdout=stdout, stderr=stderr) self._workers.append(p)
'Starts HyperSearch as a worker or runs it inline for the "dryRun" action Parameters: retval: the new _HyperSearchJob instance representing the HyperSearch job'
def __startSearch(self):
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options, forRunning=True) if (self._options['action'] == 'dryRun'): args = [sys.argv[0], ('--params=%s' % json.dumps(params))] print print '==================================================================' print 'RUNNING PERMUTATIONS INLINE as "DRY RUN"...' print '==================================================================' jobID = hypersearch_worker.main(args) else: cmdLine = _setUpExports(self._options['exports']) cmdLine += '$HYPERSEARCH' maxWorkers = self._options['maxWorkers'] jobID = self.__cjDAO.jobInsert(client='GRP', cmdLine=cmdLine, params=json.dumps(params), minimumWorkers=1, maximumWorkers=maxWorkers, jobType=self.__cjDAO.JOB_TYPE_HS) cmdLine = ('python -m nupic.swarming.hypersearch_worker --jobID=%d' % jobID) self._launchWorkers(cmdLine, maxWorkers) searchJob = _HyperSearchJob(jobID) self.__saveHyperSearchJobID(permWorkDir=self._options['permWorkDir'], outputLabel=self._options['outputLabel'], hyperSearchJob=searchJob) if (self._options['action'] == 'dryRun'): print ('Successfully executed "dry-run" hypersearch, jobID=%d' % jobID) else: print ('Successfully submitted new HyperSearch job, jobID=%d' % jobID) _emit(Verbosity.DEBUG, ('Each worker executing the command line: %s' % (cmdLine,))) return searchJob
'Retrieves the runner\'s _HyperSearchJob instance; NOTE: only available after run(). Parameters: retval: _HyperSearchJob instance or None'
def peekSearchJob(self):
assert (self.__searchJob is not None) return self.__searchJob
'Returns a tuple of all metrics keys discovered while running HyperSearch. NOTE: This is an optimization so that our client may use this info for generating the report csv file without having to pre-scan all modelInfos Parameters: retval: Tuple of metrics keys discovered while running HyperSearch;'
def getDiscoveredMetricsKeys(self):
return tuple(self.__foundMetrcsKeySet)
'Prints a listing of experiments that would take place without actually executing them. Parameters: options: NupicRunPermutations options dict retval: nothing'
@classmethod def printModels(cls, options):
print 'Generating experiment requests...' searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)
'Prints all available results in the given HyperSearch job and emits model information to the permutations report csv. The job may be completed or still in progress. Parameters: options: NupicRunPermutations options dict replaceReport: True to replace existing report csv, if any; False to append to existing report csv, if any hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved jobID, if any metricsKeys: sequence of report metrics key names to include in report; if None, will pre-scan all modelInfos to generate a complete list of metrics key names. retval: model parameters'
@classmethod def generateReport(cls, options, replaceReport, hyperSearchJob, metricsKeys):
if (hyperSearchJob is None): hyperSearchJob = cls.loadSavedHyperSearchJob(permWorkDir=options['permWorkDir'], outputLabel=options['outputLabel']) modelIDs = hyperSearchJob.queryModelIDs() bestModel = None metricstmp = set() searchVar = set() for modelInfo in _iterModels(modelIDs): if modelInfo.isFinished(): vars = modelInfo.getParamLabels().keys() searchVar.update(vars) metrics = modelInfo.getReportMetrics() metricstmp.update(metrics.keys()) if (metricsKeys is None): metricsKeys = metricstmp reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob, metricsKeys=metricsKeys, searchVar=searchVar, outputDirAbsPath=options['permWorkDir'], outputLabel=options['outputLabel'], replaceReport=replaceReport) modelStats = _ModelStats() print '\nResults from all experiments:' print '----------------------------------------------------------------' searchParams = hyperSearchJob.getParams() (optimizationMetricKey, maximizeMetric) = _PermutationUtils.getOptimizationMetricInfo(searchParams) formatStr = None foundMetricsKeySet = set(metricsKeys) sortedMetricsKeys = [] jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID()) if (jobInfo.cancel == 1): raise Exception(jobInfo.workerCompletionMsg) try: results = json.loads(jobInfo.results) except Exception as e: print 'json.loads(jobInfo.results) raised an exception. Here is some info to help with debugging:' print 'jobInfo: ', jobInfo print 'jobInfo.results: ', jobInfo.results print 'EXCEPTION: ', e raise bestModelNum = results['bestModel'] bestModelIterIndex = None totalWallTime = 0 totalRecords = 0 scoreModelIDDescList = [] for (i, modelInfo) in enumerate(_iterModels(modelIDs)): reportWriter.emit(modelInfo) totalRecords += modelInfo.getNumRecords() format = '%Y-%m-%d %H:%M:%S' startTime = modelInfo.getStartTime() if modelInfo.isFinished(): endTime = modelInfo.getEndTime() st = datetime.strptime(startTime, format) et = datetime.strptime(endTime, format) totalWallTime += (et - st).seconds modelStats.update(modelInfo) expDesc = modelInfo.getModelDescription() reportMetrics = modelInfo.getReportMetrics() optimizationMetrics = modelInfo.getOptimizationMetrics() if (modelInfo.getModelID() == bestModelNum): bestModel = modelInfo bestModelIterIndex = i bestMetric = optimizationMetrics.values()[0] if optimizationMetrics: assert (len(optimizationMetrics) == 1), ('expected 1 opt key, but got %d (%s) in %s' % (len(optimizationMetrics), optimizationMetrics, modelInfo)) if modelInfo.getCompletionReason().isEOF(): scoreModelIDDescList.append((optimizationMetrics.values()[0], modelInfo.getModelID(), modelInfo.getGeneratedDescriptionFile(), modelInfo.getParamLabels())) print ('[%d] Experiment %s\n(%s):' % (i, modelInfo, expDesc)) if (modelInfo.isFinished() and (not (modelInfo.getCompletionReason().isStopped or modelInfo.getCompletionReason().isEOF()))): print ('>> COMPLETION MESSAGE: %s' % modelInfo.getCompletionMsg()) if reportMetrics: foundMetricsKeySet.update(reportMetrics.iterkeys()) if (len(sortedMetricsKeys) != len(foundMetricsKeySet)): sortedMetricsKeys = sorted(foundMetricsKeySet) maxKeyLen = max([len(k) for k in sortedMetricsKeys]) formatStr = (' %%-%ds' % (maxKeyLen + 2)) for key in sortedMetricsKeys: if (key in reportMetrics): if (key == optimizationMetricKey): m = ('%r (*)' % reportMetrics[key]) else: m = ('%r' % reportMetrics[key]) print (formatStr % (key + ':')), m print print '--------------------------------------------------------------' if (len(modelIDs) > 0): print ('%d experiments total (%s).\n' % (len(modelIDs), ('all completed successfully' if ((modelStats.numCompletedKilled + modelStats.numCompletedEOF) == len(modelIDs)) else ('WARNING: %d models have not completed or there were errors' % (len(modelIDs) - ((modelStats.numCompletedKilled + modelStats.numCompletedEOF) + modelStats.numCompletedStopped)))))) if (modelStats.numStatusOther > 0): print ('ERROR: models with unexpected status: %d' % modelStats.numStatusOther) print ('WaitingToStart: %d' % modelStats.numStatusWaitingToStart) print ('Running: %d' % modelStats.numStatusRunning) print ('Completed: %d' % modelStats.numStatusCompleted) if (modelStats.numCompletedOther > 0): print (' ERROR: models with unexpected completion reason: %d' % modelStats.numCompletedOther) print (' ran to EOF: %d' % modelStats.numCompletedEOF) print (' ran to stop signal: %d' % modelStats.numCompletedStopped) print (' were orphaned: %d' % modelStats.numCompletedOrphaned) print (' killed off: %d' % modelStats.numCompletedKilled) print (' failed: %d' % modelStats.numCompletedError) assert (modelStats.numStatusOther == 0), ('numStatusOther=%s' % modelStats.numStatusOther) assert (modelStats.numCompletedOther == 0), ('numCompletedOther=%s' % modelStats.numCompletedOther) else: print '0 experiments total.' print global gCurrentSearch jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers) jobResults = jobStatus.getResults() if ('fieldContributions' in jobResults): print 'Field Contributions:' pprint.pprint(jobResults['fieldContributions'], indent=4) else: print 'Field contributions info not available' if (bestModel is not None): maxKeyLen = max([len(k) for k in sortedMetricsKeys]) maxKeyLen = max(maxKeyLen, len(optimizationMetricKey)) formatStr = (' %%-%ds' % (maxKeyLen + 2)) bestMetricValue = bestModel.getOptimizationMetrics().values()[0] optimizationMetricName = bestModel.getOptimizationMetrics().keys()[0] print print ('Best results on the optimization metric %s (maximize=%s):' % (optimizationMetricName, maximizeMetric)) print ('[%d] Experiment %s (%s):' % (bestModelIterIndex, bestModel, bestModel.getModelDescription())) print (formatStr % (optimizationMetricName + ':')), bestMetricValue print print ('Total number of Records processed: %d' % totalRecords) print print ('Total wall time for all models: %d' % totalWallTime) hsJobParams = hyperSearchJob.getParams() if (options['genTopNDescriptions'] > 0): print ('\nGenerating description files for top %d models...' % options['genTopNDescriptions']) scoreModelIDDescList.sort() scoreModelIDDescList = scoreModelIDDescList[0:options['genTopNDescriptions']] i = (-1) for (score, modelID, description, paramLabels) in scoreModelIDDescList: i += 1 outDir = os.path.join(options['permWorkDir'], ('model_%d' % i)) print ('Generating description file for model %s at %s' % (modelID, outDir)) if (not os.path.exists(outDir)): os.makedirs(outDir) base_description_path = os.path.join(options['outDir'], 'description.py') base_description_relpath = os.path.relpath(base_description_path, start=outDir) description = description.replace("importBaseDescription('base.py', config)", ("importBaseDescription('%s', config)" % base_description_relpath)) fd = open(os.path.join(outDir, 'description.py'), 'wb') fd.write(description) fd.close() fd = open(os.path.join(outDir, 'params.csv'), 'wb') writer = csv.writer(fd) colNames = paramLabels.keys() colNames.sort() writer.writerow(colNames) row = [paramLabels[x] for x in colNames] writer.writerow(row) fd.close() print 'Generating model params file...' mod = imp.load_source('description', os.path.join(outDir, 'description.py')) model_description = mod.descriptionInterface.getModelDescription() fd = open(os.path.join(outDir, 'model_params.py'), 'wb') fd.write(('%s\nMODEL_PARAMS = %s' % (getCopyrightHead(), pprint.pformat(model_description)))) fd.close() print reportWriter.finalize() return model_description
'Instantiates a _HyperSearchJob instance from info saved in file Parameters: permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: _HyperSearchJob instance; raises exception if not found'
@classmethod def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):
jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir, outputLabel=outputLabel) searchJob = _HyperSearchJob(nupicJobID=jobID) return searchJob
'Saves the given _HyperSearchJob instance\'s jobID to file Parameters: permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID hyperSearchJob: _HyperSearchJob instance retval: nothing'
@classmethod def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):
jobID = hyperSearchJob.getJobID() filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) if os.path.exists(filePath): _backupFile(filePath) d = dict(hyperSearchJobID=jobID) with open(filePath, 'wb') as jobIdPickleFile: pickle.dump(d, jobIdPickleFile)
'Loads a saved jobID from file Parameters: permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: HyperSearch jobID; raises exception if not found.'
@classmethod def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir, outputLabel=outputLabel) jobID = None with open(filePath, 'r') as jobIdPickleFile: jobInfo = pickle.load(jobIdPickleFile) jobID = jobInfo['hyperSearchJobID'] return jobID
'Returns filepath where to store HyperSearch JobID Parameters: permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: Filepath where to store HyperSearch JobID'
@classmethod def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
basePath = permWorkDir filename = ('%s_HyperSearchJobID.pkl' % (outputLabel,)) filepath = os.path.join(basePath, filename) return filepath
'Parameters: hyperSearchJob: _HyperSearchJob instance metricsKeys: sequence of report metrics key names to include in report outputDirAbsPath: Directory for creating report CSV file (absolute path) outputLabel: A string label to incorporate into report CSV file name replaceReport: True to replace existing report csv, if any; False to append to existing report csv, if any retval: nothing'
def __init__(self, hyperSearchJob, metricsKeys, searchVar, outputDirAbsPath, outputLabel, replaceReport):
self.__searchJob = hyperSearchJob self.__searchJobID = hyperSearchJob.getJobID() self.__sortedMetricsKeys = sorted(metricsKeys) self.__outputDirAbsPath = os.path.abspath(outputDirAbsPath) self.__outputLabel = outputLabel self.__replaceReport = replaceReport self.__sortedVariableNames = searchVar self.__csvFileObj = None self.__reportCSVPath = None self.__backupCSVPath = None
'Emit model info to csv file Parameters: modelInfo: _NupicModelInfo instance retval: nothing'
def emit(self, modelInfo):
if (self.__csvFileObj is None): self.__openAndInitCSVFile(modelInfo) csv = self.__csvFileObj print >>csv, ('%s, ' % self.__searchJobID), print >>csv, ('%s, ' % modelInfo.getModelID()), print >>csv, ('%s, ' % modelInfo.statusAsString()), if modelInfo.isFinished(): print >>csv, ('%s, ' % modelInfo.getCompletionReason()), else: print >>csv, 'NA, ', if (not modelInfo.isWaitingToStart()): print >>csv, ('%s, ' % modelInfo.getStartTime()), else: print >>csv, 'NA, ', if modelInfo.isFinished(): dateFormat = '%Y-%m-%d %H:%M:%S' startTime = modelInfo.getStartTime() endTime = modelInfo.getEndTime() print >>csv, ('%s, ' % endTime), st = datetime.strptime(startTime, dateFormat) et = datetime.strptime(endTime, dateFormat) print >>csv, ('%s, ' % str((et - st).seconds)), else: print >>csv, 'NA, ', print >>csv, 'NA, ', print >>csv, ('%s, ' % str(modelInfo.getModelDescription())), print >>csv, ('%s, ' % str(modelInfo.getNumRecords())), paramLabelsDict = modelInfo.getParamLabels() for key in self.__sortedVariableNames: if (key in paramLabelsDict): print >>csv, ('%s, ' % paramLabelsDict[key]), else: print >>csv, 'None, ', metrics = modelInfo.getReportMetrics() for key in self.__sortedMetricsKeys: value = metrics.get(key, 'NA') value = str(value) value = value.replace('\n', ' ') print >>csv, ('%s, ' % value), print >>csv
'Close file and print report/backup csv file paths Parameters: retval: nothing'
def finalize(self):
if (self.__csvFileObj is not None): self.__csvFileObj.close() self.__csvFileObj = None print ('Report csv saved in %s' % (self.__reportCSVPath,)) if self.__backupCSVPath: print ('Previous report csv file was backed up to %s' % (self.__backupCSVPath,)) else: print 'Nothing was written to report csv file.'
'- Backs up old report csv file; - opens the report csv file in append or overwrite mode (per self.__replaceReport); - emits column fields; - sets up self.__sortedVariableNames, self.__csvFileObj, self.__backupCSVPath, and self.__reportCSVPath Parameters: modelInfo: First _NupicModelInfo instance passed to emit() retval: nothing'
def __openAndInitCSVFile(self, modelInfo):
basePath = self.__outputDirAbsPath reportCSVName = ('%s_Report.csv' % (self.__outputLabel,)) reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName) backupCSVPath = None if os.path.exists(reportCSVPath): backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath) if self.__replaceReport: mode = 'w' else: mode = 'a' csv = self.__csvFileObj = open(reportCSVPath, mode) if ((not self.__replaceReport) and backupCSVPath): print >>csv print >>csv print >>csv, 'jobID, ', print >>csv, 'modelID, ', print >>csv, 'status, ', print >>csv, 'completionReason, ', print >>csv, 'startTime, ', print >>csv, 'endTime, ', print >>csv, 'runtime(s), ', print >>csv, 'expDesc, ', print >>csv, 'numRecords, ', for key in self.__sortedVariableNames: print >>csv, ('%s, ' % key), for key in self.__sortedMetricsKeys: print >>csv, ('%s, ' % key), print >>csv
'_NupicJob constructor Parameters: retval: Nupic Client JobID of the job'
def __init__(self, nupicJobID):
self.__nupicJobID = nupicJobID jobInfo = _clientJobsDB().jobInfo(nupicJobID) assert (jobInfo is not None), ('jobID=%s not found' % nupicJobID) assert (jobInfo.jobId == nupicJobID), ('%s != %s' % (jobInfo.jobId, nupicJobID)) _emit(Verbosity.DEBUG, ('_NupicJob: \n%s' % pprint.pformat(jobInfo, indent=4))) if (jobInfo.params is not None): self.__params = json.loads(jobInfo.params) else: self.__params = None
'Parameters: retval: representation of this _NupicJob instance'
def __repr__(self):
return ('%s(jobID=%s)' % (self.__class__.__name__, self.__nupicJobID))
'Parameters: workers: If this job was launched outside of the nupic job engine, then this is an array of subprocess Popen instances, one for each worker retval: _NupicJob.JobStatus instance'
def getJobStatus(self, workers):
jobInfo = self.JobStatus(self.__nupicJobID, workers) return jobInfo
'Semi-private method for retrieving the jobId Parameters: retval: Nupic Client JobID of this _NupicJob instance'
def getJobID(self):
return self.__nupicJobID
'Semi-private method for retrieving the job-specific params Parameters: retval: Job params dict corresponding to the JSON params value returned by ClientJobsDAO.jobInfo()'
def getParams(self):
return self.__params
'_NupicJob.JobStatus Constructor Parameters: nupicJobID: Nupic ClientJob ID workers: If this job was launched outside of the Nupic job engine, then this is an array of subprocess Popen instances, one for each worker retval: nothing'
def __init__(self, nupicJobID, workers):
jobInfo = _clientJobsDB().jobInfo(nupicJobID) assert (jobInfo.jobId == nupicJobID), ('%s != %s' % (jobInfo.jobId, nupicJobID)) if (workers is not None): runningCount = 0 for worker in workers: retCode = worker.poll() if (retCode is None): runningCount += 1 if (runningCount > 0): status = cjdao.ClientJobsDAO.STATUS_RUNNING else: status = cjdao.ClientJobsDAO.STATUS_COMPLETED jobInfo = jobInfo._replace(status=status) _emit(Verbosity.DEBUG, ('JobStatus: \n%s' % pprint.pformat(jobInfo, indent=4))) self.__jobInfo = jobInfo
'Parameters: retval: Job status as a human-readable string'
def statusAsString(self):
return self.__jobInfo.status
'Parameters: retval: True if the job has not been started yet'
def isWaitingToStart(self):
waiting = (self.__jobInfo.status == self.__nupicJobStatus_NotStarted) return waiting
'Parameters: retval: True if the job is starting'
def isStarting(self):
starting = (self.__jobInfo.status == self.__nupicJobStatus_Starting) return starting
'Parameters: retval: True if the job is running'
def isRunning(self):
running = (self.__jobInfo.status == self.__nupicJobStatus_running) return running
'Parameters: retval: True if the job has finished (either with success or failure)'
def isFinished(self):
done = (self.__jobInfo.status == self.__nupicJobStatus_completed) return done
'Returns _JobCompletionReason. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: _JobCompletionReason instance'
def getCompletionReason(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return _JobCompletionReason(self.__jobInfo.completionReason)
'Returns job completion message. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: completion message'
def getCompletionMsg(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return ('%s' % self.__jobInfo.completionMsg)
'Returns the worker generated completion message. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: completion message'
def getWorkerCompletionMsg(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return ('%s' % self.__jobInfo.workerCompletionMsg)
'Returns job start time. NOTE: it\'s an error to call this method if isWaitingToStart() would return True. Parameters: retval: job processing start time'
def getStartTime(self):
assert (not self.isWaitingToStart()), ('Too early to tell: %s' % self) return ('%s' % self.__jobInfo.startTime)
'Returns job end time. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: job processing end time'
def getEndTime(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return ('%s' % self.__jobInfo.endTime)
'Returns the worker state field. Parameters: retval: worker state field as a dict'
def getWorkerState(self):
if (self.__jobInfo.engWorkerState is not None): return json.loads(self.__jobInfo.engWorkerState) else: return None
'Returns the results field. Parameters: retval: job results field as a dict'
def getResults(self):
if (self.__jobInfo.results is not None): return json.loads(self.__jobInfo.results) else: return None
'Returns the model milestones field. Parameters: retval: model milestones as a dict'
def getModelMilestones(self):
if (self.__jobInfo.engModelMilestones is not None): return json.loads(self.__jobInfo.engModelMilestones) else: return None
'Returns the engine status field - used for progress messages Parameters: retval: engine status field as string'
def getEngStatus(self):
return self.__jobInfo.engStatus
'Parameters: reason: completion reason value from ClientJobsDAO.jobInfo()'
def __init__(self, reason):
self.__reason = reason
'Parameters: nupicJobID: Nupic Client JobID of a HyperSearch job retval: nothing'
def __init__(self, nupicJobID):
super(_HyperSearchJob, self).__init__(nupicJobID) self.__expectedNumModels = None
'Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: retval: A sequence of Nupic modelIDs'
def queryModelIDs(self):
jobID = self.getJobID() modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID) modelIDs = tuple((x[0] for x in modelCounterPairs)) return modelIDs
'Returns: the total number of expected models if known, -1 if it can\'t be determined. NOTE: this can take a LONG time to complete for HyperSearches with a huge number of possible permutations. Parameters: searchMethod: "v2" is the only method currently supported retval: The total number of expected models, if known; -1 if unknown'
def getExpectedNumModels(self, searchMethod):
return self.__expectedNumModels
'Constructs a dictionary of HyperSearch parameters suitable for converting to json and passing as the params argument to ClientJobsDAO.jobInsert() Parameters: options: NupicRunPermutations options dict forRunning: True if the params are for running a Hypersearch job; False if params are for introspection only. retval: A dictionary of HyperSearch parameters for ClientJobsDAO.jobInsert()'
@classmethod def makeSearchJobParamsDict(cls, options, forRunning=False):
if (options['searchMethod'] == 'v2'): hsVersion = 'v2' else: raise Exception(('Unsupported search method: %r' % options['searchMethod'])) maxModels = options['maxPermutations'] if ((options['action'] == 'dryRun') and (maxModels is None)): maxModels = 1 useTerminators = options['useTerminators'] if (useTerminators is None): params = {'hsVersion': hsVersion, 'maxModels': maxModels} else: params = {'hsVersion': hsVersion, 'useTerminators': useTerminators, 'maxModels': maxModels} if forRunning: params['persistentJobGUID'] = str(uuid.uuid1()) if options['permutationsScriptPath']: params['permutationsPyFilename'] = options['permutationsScriptPath'] elif options['expDescConfig']: params['description'] = options['expDescConfig'] else: with open(options['expDescJsonPath'], mode='r') as fp: params['description'] = json.load(fp) return params
'Retrives the optimization key name and optimization function. Parameters: searchJobParams: Parameter for passing as the searchParams arg to Hypersearch constructor. retval: (optimizationMetricKey, maximize) optimizationMetricKey: which report key to optimize for maximize: True if we should try and maximize the optimizeKey metric. False if we should minimize it.'
@classmethod def getOptimizationMetricInfo(cls, searchJobParams):
if (searchJobParams['hsVersion'] == 'v2'): search = HypersearchV2(searchParams=searchJobParams) else: raise RuntimeError(('Unsupported hypersearch version "%s"' % searchJobParams['hsVersion'])) info = search.getOptimizationMetricInfo() return info