desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Parameters:
rawInfo: A single model information element as returned by
ClientJobsDAO.modelsInfo()
retval: nothing.'
| def __init__(self, rawInfo):
| self.__rawInfo = rawInfo
self.__cachedResults = None
assert (self.__rawInfo.params is not None)
self.__cachedParams = None
|
'Parameters:
retval: Representation of this _NupicModelInfo instance.'
| def __repr__(self):
| return ('%s(jobID=%s, modelID=%s, status=%s, completionReason=%s, updateCounter=%s, numRecords=%s)' % ('_NupicModelInfo', self.__rawInfo.jobId, self.__rawInfo.modelId, self.__rawInfo.status, self.__rawInfo.completionReason, self.__rawInfo.updateCounter, self.__rawInfo.numRecords))
|
'Parameters:
retval: Nupic modelID associated with this model info.'
| def getModelID(self):
| return self.__rawInfo.modelId
|
'Parameters:
retval: Human-readable string representation of the model\'s status.'
| def statusAsString(self):
| return ('%s' % self.__rawInfo.status)
|
'Parameters:
retval: Printable description of the model.'
| def getModelDescription(self):
| params = self.__unwrapParams()
if ('experimentName' in params):
return params['experimentName']
else:
paramSettings = self.getParamLabels()
items = []
for (key, value) in paramSettings.items():
items.append(('%s_%s' % (key, value)))
return '.'.join(items)
|
'Parameters:
retval: Contents of the sub-experiment description file for
this model'
| def getGeneratedDescriptionFile(self):
| return self.__rawInfo.genDescription
|
'Paramets:
retval: The number of records processed by the model.'
| def getNumRecords(self):
| return self.__rawInfo.numRecords
|
'Parameters:
retval: a dictionary of model parameter labels. For each entry
the key is the name of the parameter and the value
is the value chosen for it.'
| def getParamLabels(self):
| params = self.__unwrapParams()
if ('particleState' in params):
retval = dict()
queue = [(pair, retval) for pair in params['particleState']['varStates'].iteritems()]
while (len(queue) > 0):
(pair, output) = queue.pop()
(k, v) = pair
if (('position' in v) and ('bestPosition' in v) and ('velocity' in v)):
output[k] = v['position']
else:
if (k not in output):
output[k] = dict()
queue.extend(((pair, output[k]) for pair in v.iteritems()))
return retval
|
'Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params'
| def __unwrapParams(self):
| if (self.__cachedParams is None):
self.__cachedParams = json.loads(self.__rawInfo.params)
assert (self.__cachedParams is not None), ('%s resulted in None' % self.__rawInfo.params)
return self.__cachedParams
|
'Retrives a dictionary of metrics designated for report
Parameters:
retval: a dictionary of metrics that were collected for the model or
an empty dictionary if there aren\'t any.'
| def getReportMetrics(self):
| return self.__unwrapResults().reportMetrics
|
'Retrives a dictionary of metrics designagted for optimization
Parameters:
retval: a dictionary of optimization metrics that were collected
for the model or an empty dictionary if there aren\'t any.'
| def getOptimizationMetrics(self):
| return self.__unwrapResults().optimizationMetrics
|
'Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren\'t any.'
| def getAllMetrics(self):
| result = self.getReportMetrics()
result.update(self.getOptimizationMetrics())
return result
|
'Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
retval: ModelResults namedtuple instance'
| def __unwrapResults(self):
| if (self.__cachedResults is None):
if (self.__rawInfo.results is not None):
resultList = json.loads(self.__rawInfo.results)
assert (len(resultList) == 2), ('Expected 2 elements, but got %s (%s).' % (len(resultList), resultList))
self.__cachedResults = self.ModelResults(reportMetrics=resultList[0], optimizationMetrics=resultList[1])
else:
self.__cachedResults = self.ModelResults(reportMetrics={}, optimizationMetrics={})
return self.__cachedResults
|
'Parameters:
retval: True if the job has not been started yet'
| def isWaitingToStart(self):
| waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted)
return waiting
|
'Parameters:
retval: True if the job has not been started yet'
| def isRunning(self):
| running = (self.__rawInfo.status == self.__nupicModelStatus_running)
return running
|
'Parameters:
retval: True if the model\'s processing has completed (either with
success or failure).'
| def isFinished(self):
| finished = (self.__rawInfo.status == self.__nupicModelStatus_completed)
return finished
|
'Returns _ModelCompletionReason.
NOTE: it\'s an error to call this method if isFinished() would return False.
Parameters:
retval: _ModelCompletionReason instance'
| def getCompletionReason(self):
| assert self.isFinished(), ('Too early to tell: %s' % self)
return _ModelCompletionReason(self.__rawInfo.completionReason)
|
'Returns model completion message.
NOTE: it\'s an error to call this method if isFinished() would return False.
Parameters:
retval: completion message'
| def getCompletionMsg(self):
| assert self.isFinished(), ('Too early to tell: %s' % self)
return self.__rawInfo.completionMsg
|
'Returns model evaluation start time.
NOTE: it\'s an error to call this method if isWaitingToStart() would
return True.
Parameters:
retval: model evaluation start time'
| def getStartTime(self):
| assert (not self.isWaitingToStart()), ('Too early to tell: %s' % self)
return ('%s' % self.__rawInfo.startTime)
|
'Returns mode evaluation end time.
NOTE: it\'s an error to call this method if isFinished() would return False.
Parameters:
retval: model evaluation end time'
| def getEndTime(self):
| assert self.isFinished(), ('Too early to tell: %s' % self)
return ('%s' % self.__rawInfo.endTime)
|
'Instantiate our results database
Parameters:
hsObj: Reference to the HypersearchV2 instance'
| def __init__(self, hsObj):
| self._hsObj = hsObj
self._allResults = []
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
self._modelIDToIdx = dict()
self._bestResult = numpy.inf
self._bestModelID = None
self._swarmBestOverall = dict()
self._swarmNumParticlesPerGeneration = dict()
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
self._particleBest = dict()
self._particleLatestGenIdx = dict()
self._swarmIdToIndexes = dict()
self._paramsHashToIndexes = dict()
|
'Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric'
| def update(self, modelID, modelParams, modelParamsHash, metricResult, completed, completionReason, matured, numRecords):
| assert (modelParamsHash is not None)
if completed:
matured = True
if ((metricResult is not None) and matured and (completionReason in [ClientJobsDAO.CMPL_REASON_EOF, ClientJobsDAO.CMPL_REASON_STOPPED])):
if self._hsObj._maximize:
errScore = ((-1) * metricResult)
else:
errScore = metricResult
if (errScore < self._bestResult):
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info(('New best model after %d evaluations: errScore %g on model %s' % (len(self._allResults), self._bestResult, self._bestModelID)))
else:
errScore = numpy.inf
if (completed and (completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN])):
errScore = numpy.inf
hidden = True
else:
hidden = False
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if (completionReason == ClientJobsDAO.CMPL_REASON_ERROR):
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
wasHidden = False
if (modelID not in self._modelIDToIdx):
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, errScore=errScore, completed=completed, matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = (len(self._allResults) - 1)
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if (not hidden):
if (swarmId in self._swarmIdToIndexes):
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while (genIdx >= len(numPsEntry)):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
if (entry['modelParamsHash'] != modelParamsHash):
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
if (hidden and (not wasHidden)):
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if (matured and (not hidden)):
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if (errScore < oldResult):
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
prevGenIdx = self._particleLatestGenIdx.get(particleId, (-1))
if ((not hidden) and (genIdx > prevGenIdx)):
self._particleLatestGenIdx[particleId] = genIdx
elif (hidden and (not wasHidden) and (genIdx == prevGenIdx)):
self._particleLatestGenIdx[particleId] = (genIdx - 1)
if (not hidden):
swarmId = modelParams['particleState']['swarmId']
if (not (swarmId in self._swarmBestOverall)):
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while (genIdx >= len(bestScores)):
bestScores.append((None, numpy.inf))
if (errScore < bestScores[genIdx][1]):
bestScores[genIdx] = (modelID, errScore)
if (not hidden):
key = (swarmId, genIdx)
if (not (key in self._maturedSwarmGens)):
self._modifiedSwarmGens.add(key)
return errScore
|
'Return number of models that completed with errors.
Parameters:
retval: # if models'
| def getNumErrModels(self):
| return self._numErrModels
|
'Return list of models IDs that completed with errors.
Parameters:
retval: # if models'
| def getErrModelIds(self):
| return list(self._errModels)
|
'Return total number of models that completed.
Parameters:
retval: # if models that completed'
| def getNumCompletedModels(self):
| return self._numCompletedModels
|
'Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
paramsHash: paramsHash to look for
retval: modelId, or None if not found'
| def getModelIDFromParamsHash(self, paramsHash):
| entryIdx = self._paramsHashToIndexes.get(paramsHash, None)
if (entryIdx is not None):
return self._allResults[entryIdx]['modelID']
else:
return None
|
'Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
swarmId: A string representation of the sorted list of encoders
in this swarm. For example \'__address_encoder.__gym_encoder\'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels'
| def numModels(self, swarmId=None, includeHidden=False):
| if includeHidden:
if (swarmId is None):
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
else:
if (swarmId is None):
entries = self._allResults
else:
entries = [self._allResults[entryIdx] for entryIdx in self._swarmIdToIndexes.get(swarmId, [])]
return len([entry for entry in entries if (not entry['hidden'])])
|
'Return the model ID of the model with the best result so far and
it\'s score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
swarmId: A string representation of the sorted list of encoders in this
swarm. For example \'__address_encoder.__gym_encoder\'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)'
| def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
| if (swarmId is None):
return (self._bestModelID, self._bestResult)
else:
if (swarmId not in self._swarmBestOverall):
return (None, numpy.inf)
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if ((genIdx is not None) and (i > genIdx)):
break
if (errScore < bestScore):
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
|
'Return particle info for a specific modelId.
Parameters:
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)'
| def getParticleInfo(self, modelId):
| entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'], entry['completed'], entry['matured'])
|
'Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
swarmId: A string representation of the sorted list of encoders in this
swarm. For example \'__address_encoder.__gym_encoder\'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if \'completed\' is True, or running if \'completed\'
is false
matured: If not None, only return particles of the given state (either
matured if \'matured\' is True, or not matured if \'matured\'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don\'t have a result yet
completed: list of completed booleans
matured: list of matured booleans'
| def getParticleInfos(self, swarmId=None, genIdx=None, completed=None, matured=None, lastDescendent=False):
| if (swarmId is not None):
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if (len(entryIdxs) == 0):
return ([], [], [], [], [])
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
if (swarmId is not None):
assert (not entry['hidden'])
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if ((genIdx is not None) and (particleGenIdx != genIdx)):
continue
if ((completed is not None) and (completed != isCompleted)):
continue
if ((matured is not None) and (matured != isMatured)):
continue
if (lastDescendent and (self._particleLatestGenIdx[particleId] != particleGenIdx)):
continue
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
|
'Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
swarmId: A string representation of the sorted list of encoders in this
swarm. For example \'__address_encoder.__gym_encoder\'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don\'t have a result yet
completed: list of completed booleans
matured: list of matured booleans'
| def getOrphanParticleInfos(self, swarmId, genIdx):
| entryIdxs = range(len(self._allResults))
if (len(entryIdxs) == 0):
return ([], [], [], [], [])
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
if (not entry['hidden']):
continue
modelParams = entry['modelParams']
if (modelParams['particleState']['swarmId'] != swarmId):
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if ((genIdx is not None) and (particleGenIdx != genIdx)):
continue
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
|
'Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)'
| def getMaturedSwarmGenerations(self):
| result = []
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
if (key in self._maturedSwarmGens):
self._modifiedSwarmGens.remove(key)
continue
if ((genIdx >= 1) and (not ((swarmId, (genIdx - 1)) in self._maturedSwarmGens))):
continue
(_, _, errScores, completedFlags, maturedFlags) = self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if ((numMatured >= self._hsObj._minParticlesPerSwarm) and (numMatured == len(maturedFlags))):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
return result
|
'Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
swarmId: A string representation of the sorted list of encoders in this
swarm. For example \'__address_encoder.__gym_encoder\'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.'
| def firstNonFullGeneration(self, swarmId, minNumParticles):
| if (not (swarmId in self._swarmNumParticlesPerGeneration)):
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where((numPsPerGen < minNumParticles))[0]
if (len(firstNonFull) == 0):
return len(numPsPerGen)
else:
return firstNonFull[0]
|
'Return the generation index of the highest generation in the given
swarm.
Parameters:
swarmId: A string representation of the sorted list of encoders in this
swarm. For example \'__address_encoder.__gym_encoder\'
retval: generation index'
| def highestGeneration(self, swarmId):
| numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return (len(numPsPerGen) - 1)
|
'Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
particleId: which particle
retval: (bestResult, bestPosition)'
| def getParticleBest(self, particleId):
| return self._particleBest.get(particleId, (None, None))
|
'Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
[\'a\', \'b\', \'c\']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
[\'a\':(\'a\', [0.1, 0.2, 0.3]), \'b\':(\'b\', [0.5, 0.1, 0.6]), \'c\':(\'c\', [])]
Parameters:
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.'
| def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
| results = dict()
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId, genIdx=None, matured=True)
for (particleState, resultErr) in itertools.izip(allParticles, resultErrs):
if (maxGenIdx is not None):
if (particleState['genIdx'] > maxGenIdx):
continue
if (resultErr == numpy.inf):
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if (varPositionStr in results):
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
|
'Instantiate the HyperseachV2 instance.
Parameters:
searchParams: a dict of the job\'s search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine\'s jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None'
| def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None, logLevel=None):
| self.logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if (logLevel is not None):
self.logger.setLevel(logLevel)
random.seed(42)
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
self.logger.info(('searchParams: \n%s' % pprint.pformat(clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints', False)
self._maxModels = self._searchParams.get('maxModels', None)
if (self._maxModels == (-1)):
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
self._speculativeParticles = self._searchParams.get('speculativeParticles', bool(int(Configuration.get('nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get('nupic.hypersearch.speculative.particles.sleepSecondsMax'))
self._maxBranching = int(Configuration.get('nupic.hypersearch.max.field.branching'))
self._minFieldContribution = float(Configuration.get('nupic.hypersearch.min.field.contribution'))
self._jobCancelled = False
if ('useTerminators' in self._searchParams):
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
if ('NTA_TEST_exitAfterNModels' in os.environ):
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
self._tempDir = None
try:
if ('description' in self._searchParams):
if (('permutationsPyFilename' in self._searchParams) or ('permutationsPyContents' in self._searchParams) or ('descriptionPyContents' in self._searchParams)):
raise RuntimeError("Either 'description', 'permutationsPyFilename' or'permutationsPyContents' & 'permutationsPyContents' should be specified, but not two or more of these at once.")
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams', dict())
if (anomalyParams is None):
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or (anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False, maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([('--description=%s' % json.dumps(self._searchParams['description'])), '--version=v2', ('--outDir=%s' % outDir)])
permutationsScript = os.path.join(outDir, 'permutations.py')
elif ('permutationsPyFilename' in self._searchParams):
if (('description' in self._searchParams) or ('permutationsPyContents' in self._searchParams) or ('descriptionPyContents' in self._searchParams)):
raise RuntimeError("Either 'description', 'permutationsPyFilename' or 'permutationsPyContents' & 'permutationsPyContents' should be specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif ('permutationsPyContents' in self._searchParams):
if (('description' in self._searchParams) or ('permutationsPyFilename' in self._searchParams)):
raise RuntimeError("Either 'description', 'permutationsPyFilename' or'permutationsPyContents' & 'permutationsPyContents' should be specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError("Either 'description' or 'permutationsScript' must bespecified")
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath, 'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
(modelDescription, _) = helpers.loadExperiment(self._basePath)
self._readPermutationsFile(permutationsScript, modelDescription)
if (self._cjDAO is not None):
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID, fieldName='genBaseDescription', curValue=None, newValue=self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID, fieldName='genPermutations', curValue=None, newValue=permContents)
if (self._dummyModelParamsFunc is not None):
if (self._dummyModel is None):
self._dummyModel = dict()
if (self.logger.getEffectiveLevel() <= logging.DEBUG):
msg = StringIO.StringIO()
print >>msg, 'Permutations file specifications: '
info = dict()
for key in ['_predictedField', '_permutations', '_flattenedPermutations', '_encoderNames', '_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >>msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
self._resultsDB = ResultsDB(self)
self._swarmTerminator = SwarmTerminator()
self._hsState = None
self._maxUniqueModelAttempts = int(Configuration.get('nupic.hypersearch.maxUniqueModelAttempts'))
self._modelOrphanIntervalSecs = float(Configuration.get('nupic.hypersearch.modelOrphanIntervalSecs'))
self._maxPctErrModels = float(Configuration.get('nupic.hypersearch.maxPctErrModels'))
except:
if (self._tempDir is not None):
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
|
'Generate stream definition based on'
| def _getStreamDef(self, modelDescription):
| aggregationPeriod = {'days': 0, 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}
aggFunctionsDict = {}
if ('aggregation' in modelDescription['streamDef']):
for key in aggregationPeriod.keys():
if (key in modelDescription['streamDef']['aggregation']):
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if ('fields' in modelDescription['streamDef']['aggregation']):
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
hasAggregation = False
for v in aggregationPeriod.values():
if (v != 0):
hasAggregation = True
break
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
|
'Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).'
| def __del__(self):
| self.close()
return
|
'Deletes temporary system objects/files.'
| def close(self):
| if ((self._tempDir is not None) and os.path.isdir(self._tempDir)):
self.logger.debug('Removing temporary directory %r', self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
|
'Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The \'report\' list from the permutations file.
This is a list of the items from each experiment\'s pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, \'key1:key2\'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
filename: Name of permutations file
retval: None'
| def _readPermutationsFile(self, filename, modelDescription):
| vars = {}
permFile = execfile(filename, globals(), vars)
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None
self._predictedFieldEncoder = None
self._fixedFields = None
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if (self._fastSwarmModelParams is not None):
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if (encoders[fieldName] is not None):
self._fixedFields.append(fieldName)
if ('fixedFields' in vars):
self._fixedFields = vars['fixedFields']
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if (self._minParticlesPerSwarm == None):
self._minParticlesPerSwarm = Configuration.get('nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
self._killUselessSwarms = vars.get('killUselessSwarms', True)
self._inputPredictedField = vars.get('inputPredictedField', 'yes')
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
self._tryAll3FieldCombinationsWTimestamps = vars.get('tryAll3FieldCombinationsWTimestamps', False)
minFieldContribution = vars.get('minFieldContribution', None)
if (minFieldContribution is not None):
self._minFieldContribution = minFieldContribution
maxBranching = vars.get('maxFieldBranching', None)
if (maxBranching is not None):
self._maxBranching = maxBranching
if ('maximize' in vars):
self._optimizeKey = vars['maximize']
self._maximize = True
elif ('minimize' in vars):
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize or minimize metric.")
maxModels = vars.get('maxModels')
if (maxModels is not None):
if (self._maxModels is None):
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job params AND in the permutations file.')
inferenceType = modelDescription['modelParams']['inferenceType']
if (not InferenceType.validate(inferenceType)):
raise ValueError(('Invalid inference type %s' % inferenceType))
if (inferenceType in [InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]):
classifierOnlyEncoder = None
for encoder in modelDescription['modelParams']['sensorParams']['encoders'].values():
if (encoder.get('classifierOnly', False) and (encoder['fieldname'] == vars.get('predictedField', None))):
classifierOnlyEncoder = encoder
break
if ((classifierOnlyEncoder is None) or (self._inputPredictedField == 'yes')):
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif (inferenceType in [InferenceType.TemporalNextStep, InferenceType.TemporalAnomaly]):
self._searchType = HsSearchType.legacyTemporal
elif (inferenceType in (InferenceType.TemporalClassification, InferenceType.NontemporalClassification)):
self._searchType = HsSearchType.classification
else:
raise RuntimeError(('Unsupported inference type: %s' % inferenceType))
self._predictedField = vars.get('predictedField', None)
if (self._predictedField is None):
raise RuntimeError(("Permutations file '%s' does not have the required 'predictedField' variable" % filename))
if ('permutations' not in vars):
raise RuntimeError(("Permutations file '%s' does not define permutations" % filename))
if (not isinstance(vars['permutations'], dict)):
raise RuntimeError("Permutations file '%s' defines a permutations variable but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if (':' in keys[(-1)]):
raise RuntimeError("The permutation variable '%s' contains a ':' character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
if (value.fieldName == self._predictedField):
self._predictedFieldEncoder = flatKey
for (encKey, encValue) in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations[('%s:%s' % (flatKey, encKey))] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations)
|
'Computes the number of models that are expected to complete as part of
this instances\'s HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py for use in progress
reporting.
Parameters:
retval: The total number of expected models, if known; -1 if unknown'
| def getExpectedNumModels(self):
| return (-1)
|
'Generates a list of model names that are expected to complete as part of
this instances\'s HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py.
Parameters:
retval: List of model names for this HypersearchV2 instance, or
None of not applicable'
| def getModelNames(self):
| return None
|
'Returns a dictionary of permutation variables.
Parameters:
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.'
| def getPermutationVariables(self):
| return self._flattenedPermutations
|
'Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).'
| def getComplexVariableLabelLookupDict(self):
| raise NotImplementedError
|
'Retrives the optimization key name and optimization function.
Parameters:
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.'
| def getOptimizationMetricInfo(self):
| return (self._optimizeKey, self._maximize)
|
'If there are any models that haven\'t been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
retval:'
| def _checkForOrphanedModels(self):
| self.logger.debug(('Checking for orphaned models older than %s' % self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID, self._modelOrphanIntervalSecs)
if (orphanedModelId is None):
return
self.logger.info(('Removing orphaned model: %d' % orphanedModelId))
for attempt in range(100):
paramsHash = hashlib.md5(('OrphanParams.%d.%d' % (orphanedModelId, attempt))).digest()
particleHash = hashlib.md5(('OrphanParticle.%d.%d' % (orphanedModelId, attempt))).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId, dict(engParamsHash=paramsHash, engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if (not success):
raise RuntimeError('Unexpected failure to change paramsHash and particleHash of orphaned model')
self._cjDAO.modelSetCompleted(modelID=orphanedModelId, completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN, completionMsg='Orphaned')
self._resultsDB.update(modelID=orphanedModelId, modelParams=None, modelParamsHash=paramsHash, metricResult=None, completed=True, completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN, matured=True, numRecords=0)
|
'Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can\'t find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.'
| def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
| if (self._hsState is None):
self._hsState = HsState(self)
self._hsState.readStateFromDB()
completedSwarms = set()
if (exhaustedSwarmId is not None):
self.logger.info(("Removing swarm %s from the active set because we can't find any new unique particle positions" % exhaustedSwarmId))
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId=exhaustedSwarmId, matured=False)
if (len(particles) > 0):
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId=swarmId, matured=False)
if (len(particles) == 0):
completedSwarms.add(swarmId)
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
if (swarmId in priorCompletedSwarms):
continue
completedList = self._swarmTerminator.recordDataPoint(swarmId=swarmId, generation=genIdx, errScore=errScore)
statusMsg = ("Completed generation #%d of swarm '%s' with a best errScore of %g" % (genIdx, swarmId, errScore))
if (len(completedList) > 0):
statusMsg = ('%s. Matured swarm(s): %s' % (statusMsg, completedList))
self.logger.info(statusMsg)
self._cjDAO.jobSetFields(jobID=self._jobID, fields=dict(engStatus=statusMsg), useConnectionID=False, ignoreUnchanged=True)
if ('NTA_TEST_recordSwarmTerminations' in os.environ):
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if (resultsStr is None):
results = {}
else:
results = json.loads(resultsStr)
if (not ('terminatedSwarms' in results)):
results['terminatedSwarms'] = {}
for swarm in completedList:
if (swarm not in results['terminatedSwarms']):
results['terminatedSwarms'][swarm] = (genIdx, self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if (newResultsStr == resultsStr):
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID, fieldName='results', curValue=resultsStr, newValue=json.dumps(results))
if updated:
break
if (len(completedList) > 0):
for name in completedList:
self.logger.info(('Swarm matured: %s. Score at generation %d: %s' % (name, genIdx, errScore)))
completedSwarms = completedSwarms.union(completedList)
if ((len(completedSwarms) == 0) and (exhaustedSwarmId is None)):
return
while True:
if (exhaustedSwarmId is not None):
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
if (not self._hsState.isDirty()):
return
success = self._hsState.writeStateToDB()
if success:
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if (jobResultsStr is not None):
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(swarmId=swarmId, completed=False)
if (bestModelId in modelIds):
modelIds.remove(bestModelId)
if (len(modelIds) == 0):
continue
self.logger.info(("Killing the following models in swarm '%s' becausethe swarm is being terminated: %s" % (swarmId, str(modelIds))))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED), ignoreUnchanged=True)
return
self._hsState.readStateFromDB()
self.logger.debug(('New hsState has been set by some other worker to: \n%s' % pprint.pformat(self._hsState._state, indent=4)))
|
'Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn\'t find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either \'completing\' or
\'completed\'. If there are still models being evaluaed in
it, mark it as \'completing\', else \'completed. This is
used in situations where we can\'t find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.'
| def _getCandidateParticleAndSwarm(self, exhaustedSwarmId=None):
| jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID, ['workerCompletionReason', 'workerCompletionMsg'])
if (workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS):
self.logger.info('Exiting due to job being cancelled')
self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg='Job was cancelled'), useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error(('Exiting because some worker set the workerCompletionReason to %s. WorkerCompletionMsg: %s' % (workerCmpReason, workerCmpMsg)))
return (True, None, None)
if (self._hsState is not None):
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
activeSwarms = self._hsState.getActiveSwarms()
if (activeSwarms != priorActiveSwarms):
self.logger.info(('Active swarms changed to %s (from %s)' % (activeSwarms, priorActiveSwarms)))
self.logger.debug(('Active swarms: %s' % activeSwarms))
totalCmpModels = self._resultsDB.getNumCompletedModels()
if (totalCmpModels > 5):
numErrs = self._resultsDB.getNumErrModels()
if ((float(numErrs) / totalCmpModels) > self._maxPctErrModels):
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = ('%s: Exiting due to receiving too many models failing from exceptions (%d out of %d). \nModel Exception: %s' % (ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels, modelErrMsg))
self.logger.error(cmpMsg)
workerCmpReason = self._cjDAO.jobGetFields(self._jobID, ['workerCompletionReason'])[0]
if (workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS):
self._cjDAO.jobSetFields(self._jobID, fields=dict(cancel=True, workerCompletionReason=ClientJobsDAO.CMPL_REASON_ERROR, workerCompletionMsg=cmpMsg), useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
if self._hsState.isSearchOver():
cmpMsg = 'Exiting because results did not improve in most recently completed sprint.'
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg=cmpMsg), useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
sprintIdx = (-1)
while True:
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
if eos:
if self._hsState.anyGoodSprintsActive():
self.logger.info('No more sprints to explore, waiting for prior sprints to complete')
return (False, None, None)
else:
cmpMsg = "Exiting because we've evaluated all possible field combinations"
self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg=cmpMsg), useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if (not active):
if (not self._speculativeParticles):
if (not self._hsState.isSprintCompleted(sprintIdx)):
self.logger.info(('Waiting for all particles in sprint %d to completebefore evolving any more particles' % sprintIdx))
return (False, None, None)
continue
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(swarmId=swarmId, minNumParticles=self._minParticlesPerSwarm)
if (firstNonFullGenIdx is None):
continue
if (firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId)):
self.logger.info(('Cloning an earlier model in generation %d of swarm %s (sprintIdx=%s) to replace an orphaned model' % (firstNonFullGenIdx, swarmId, sprintIdx)))
(allParticles, allModelIds, errScores, completed, matured) = self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if (len(allModelIds) > 0):
newParticleId = True
self.logger.info('Cloning an orphaned model')
else:
newParticleId = True
self.logger.info('No orphans found, so cloning a non-orphan')
(allParticles, allModelIds, errScores, completed, matured) = self._resultsDB.getParticleInfos(swarmId=swarmId, genIdx=firstNonFullGenIdx)
modelId = random.choice(allModelIds)
self.logger.info(('Cloning model %r' % modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj=self, resultsDB=self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, newFromClone=particleState, newParticleId=newParticleId)
return (False, particle, swarmId)
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
(allParticles, allModelIds, errScores, completed, matured) = self._resultsDB.getParticleInfos(swarmId)
if (len(allParticles) < self._minParticlesPerSwarm):
particle = Particle(hsObj=self, resultsDB=self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, swarmId=swarmId, newFarFrom=allParticles)
bestPriorModel = None
if (sprintIdx >= 1):
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if (bestPriorModel is not None):
self.logger.info(('Best model and errScore from previous sprint(%d): %s, %g' % (0, str(bestPriorModel), errScore)))
(baseState, modelId, errScore, completed, matured) = self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
whichVars = []
for varName in baseState['varStates']:
if (':' in varName):
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug(('Particle after incorporating encoder vars from best model in previous sprint: \n%s' % str(particle)))
return (False, particle, swarmId)
(readyParticles, readyModelIds, readyErrScores, _, _) = self._resultsDB.getParticleInfos(swarmId, genIdx=None, matured=True, lastDescendent=True)
if (len(readyParticles) > 0):
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
useParticle = None
for particle in readyParticles:
if (particle['genIdx'] == genIdx):
useParticle = particle
break
if (not self._speculativeParticles):
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId, genIdx=genIdx, matured=False)
if (len(particles) > 0):
continue
particle = Particle(hsObj=self, resultsDB=self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, evolveFromState=useParticle)
return (False, particle, swarmId)
if (not self._speculativeParticles):
self.logger.info(('Waiting for one or more of the %s swarms to complete a generation before evolving any more particles' % str(swarmIds)))
return (False, None, None)
|
'Test if it\'s OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it\'s the best) before returning True.'
| def _okToExit(self):
| print >>sys.stderr, 'reporter:status:In hypersearchV2: _okToExit'
if (not self._jobCancelled):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if (len(modelIds) > 0):
self.logger.info('Ready to end hyperseach, but not all models have matured yet. Sleeping a bit to wait for all models to mature.')
time.sleep((5.0 * random.random()))
return False
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info(('Stopping model %d because the search has ended' % modelId))
self._cjDAO.modelSetFields(modelId, dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED), ignoreUnchanged=True)
self._hsStatePeriodicUpdate()
(pctFieldContributions, absFieldContributions) = self._hsState.getFieldContributions()
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if (jobResultsStr is not None):
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
if (pctFieldContributions != jobResults.get('fieldContributions', None)):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID, fieldName='results', curValue=jobResultsStr, newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s', pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, another hypersearch worker must have updated it')
return True
|
'Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it\'s params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it\'s variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle\'s generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable\'s
position, velocity, bestPosition, bestResult, etc.'
| def createModels(self, numModels=1):
| self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
if ((self._maxModels is not None) and ((self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >= self._maxModels)):
return (self._okToExit(), [])
if (candidateParticle is None):
(exitNow, candidateParticle, candidateSwarm) = self._getCandidateParticleAndSwarm()
if (candidateParticle is None):
if exitNow:
return (self._okToExit(), [])
else:
print >>sys.stderr, 'reporter:status:In hypersearchV2: speculativeWait'
time.sleep((self._speculativeWaitSecondsMax * random.random()))
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
while True:
if (numAttempts >= 1):
self.logger.debug(('Agitating particle to get unique position after %d failed attempts in a row' % numAttempts))
candidateParticle.agitate()
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
if (flatKey in self._encoderNames):
if (flatKey in useEncoders):
return value.getDict(flatKey, position)
else:
return None
elif (flatKey in position):
return position[flatKey]
else:
return value
structuredParams = rCopy(self._permutations, _buildStructuredParams, discardNoneKeys=False)
modelParams = dict(structuredParams=structuredParams, particleState=candidateParticle.getState())
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = ('%s.%s' % (modelParams['particleState']['id'], modelParams['particleState']['genIdx']))
particleHash = hashlib.md5(particleInst).digest()
numAttempts += 1
if (self._filterFunc and (not self._filterFunc(structuredParams))):
valid = False
else:
valid = True
if (valid and (self._resultsDB.getModelIDFromParamsHash(paramsHash) is None)):
break
if (numAttempts >= self._maxUniqueModelAttempts):
(exitNow, candidateParticle, candidateSwarm) = self._getCandidateParticleAndSwarm(exhaustedSwarmId=candidateSwarm)
if (candidateParticle is None):
if exitNow:
return (self._okToExit(), [])
else:
time.sleep((self._speculativeWaitSecondsMax * random.random()))
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
if (self.logger.getEffectiveLevel() <= logging.DEBUG):
self.logger.debug(('Submitting new potential model to HypersearchWorker: \n%s' % pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
|
'Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it\'s own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it\'s currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.'
| def recordModelProgress(self, modelID, modelParams, modelParamsHash, results, completed, completionReason, matured, numRecords):
| if (results is None):
metricResult = None
else:
metricResult = results[1].values()[0]
errScore = self._resultsDB.update(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, metricResult=metricResult, completed=completed, completionReason=completionReason, matured=matured, numRecords=numRecords)
self.logger.debug('Received progress on model %d: completed: %s, cmpReason: %s, numRecords: %d, errScore: %s', modelID, completed, completionReason, numRecords, errScore)
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug(('Best err score seen so far: %s on model %s' % (bestResult, bestModelID)))
|
'Run the given model.
This runs the model described by \'modelParams\'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key'
| def runModel(self, modelID, jobID, modelParams, modelParamsHash, jobsDAO, modelCheckpointGUID):
| if (not self._createCheckpoints):
modelCheckpointGUID = None
self._resultsDB.update(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, metricResult=None, completed=False, completionReason=None, matured=False, numRecords=0)
structuredParams = modelParams['structuredParams']
if (self.logger.getEffectiveLevel() <= logging.DEBUG):
self.logger.debug(('Running Model. \nmodelParams: %s, \nmodelID=%s, ' % (pprint.pformat(modelParams, indent=4), modelID)))
cpuTimeStart = time.clock()
logLevel = self.logger.getEffectiveLevel()
try:
if ((self._dummyModel is None) or (self._dummyModel is False)):
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(modelID=modelID, jobID=jobID, baseDescription=self._baseDescription, params=structuredParams, predictedField=self._predictedField, reportKeys=self._reportKeys, optimizeKey=self._optimizeKey, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if (self._dummyModelParamsFunc is not None):
permInfo = dict(structuredParams)
permInfo['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(modelID=modelID, jobID=jobID, params=dummyParams, predictedField=self._predictedField, reportKeys=self._reportKeys, optimizeKey=self._optimizeKey, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=self._predictionCacheMaxRecords)
jobsDAO.modelSetCompleted(modelID, completionReason=cmpReason, completionMsg=cmpMsg, cpuTime=(time.clock() - cpuTimeStart))
except InvalidConnectionException as e:
self.logger.warn('%s', e)
|
'Parameters:
modelID: ID for this model in the models table
jobID: ID for this hypersearch job in the jobs table
predictedField: Name of the input field for which this model is being
optimized
experimentDir: Directory path containing the experiment\'s
description.py script
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment\'s
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model\'s table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key. If None, then
don\'t bother creating a model checkpoint.
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for default value.'
| def __init__(self, modelID, jobID, predictedField, experimentDir, reportKeyPatterns, optimizeKeyPattern, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
| self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get('nupic.hypersearch.bestModelMinRecords'))
self._MATURITY_MAX_CHANGE = float(Configuration.get('nupic.hypersearch.maturityPctChange'))
self._MATURITY_NUM_POINTS = int(Configuration.get('nupic.hypersearch.maturityNumPoints'))
self._modelID = modelID
self._jobID = jobID
self._predictedField = predictedField
self._experimentDir = experimentDir
self._reportKeyPatterns = reportKeyPatterns
self._optimizeKeyPattern = optimizeKeyPattern
self._jobsDAO = jobsDAO
self._modelCheckpointGUID = modelCheckpointGUID
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._isMaturityEnabled = bool(int(Configuration.get('nupic.hypersearch.enableModelMaturity')))
self._logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self._optimizedMetricLabel = None
self._reportMetricLabels = []
self._cmpReason = ClientJobsDAO.CMPL_REASON_EOF
if (logLevel is not None):
self._logger.setLevel(logLevel)
self.__metricMgr = None
self.__task = None
self._periodic = None
self._streamDef = None
self._model = None
self._inputSource = None
self._currentRecordIndex = None
self._predictionLogger = None
self.__predictionCache = deque()
self._isBestModel = False
self._isBestModelStored = False
self._isCanceled = False
self._isKilled = False
self._isMature = False
self._isInterrupted = threading.Event()
self._metricRegression = regression.AveragePctChange(windowSize=self._MATURITY_NUM_POINTS)
self.__loggedMetricPatterns = []
|
'Runs the OPF Model
Parameters:
retval: (completionReason, completionMsg)
where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
equates.'
| def run(self):
| descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(self._experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
expIface.normalizeStreamSources()
modelDescription = expIface.getModelDescription()
self._modelControl = expIface.getModelControl()
streamDef = self._modelControl['dataset']
from nupic.data.stream_reader import StreamReader
readTimeout = 0
self._inputSource = StreamReader(streamDef, isBlocking=False, maxTimeout=readTimeout)
fieldStats = self._getFieldStats()
self._model = ModelFactory.create(modelDescription)
self._model.setFieldStatistics(fieldStats)
self._model.enableLearning()
self._model.enableInference(self._modelControl.get('inferenceArgs', None))
self.__metricMgr = MetricsManager(self._modelControl.get('metrics', None), self._model.getFieldInfo(), self._model.getInferenceType())
self.__loggedMetricPatterns = self._modelControl.get('loggedMetrics', [])
self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
self._reportMetricLabels = matchPatterns(self._reportKeyPatterns, self._getMetricLabels())
self._periodic = self._initPeriodicActivities()
numIters = self._modelControl.get('iterationCount', (-1))
learningOffAt = None
iterationCountInferOnly = self._modelControl.get('iterationCountInferOnly', 0)
if (iterationCountInferOnly == (-1)):
self._model.disableLearning()
elif (iterationCountInferOnly > 0):
assert (numIters > iterationCountInferOnly), 'when iterationCountInferOnly is specified, iterationCount must be greater than iterationCountInferOnly.'
learningOffAt = (numIters - iterationCountInferOnly)
self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)
self._finalize()
return (self._cmpReason, None)
|
'Main loop of the OPF Model Runner.
Parameters:
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number'
| def __runTaskMainLoop(self, numIters, learningOffAt=None):
| self._model.resetSequenceStates()
self._currentRecordIndex = (-1)
while True:
if self._isKilled:
break
if self._isCanceled:
break
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
if self._isMature:
if (not self._isBestModel):
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
if ((learningOffAt is not None) and (self._currentRecordIndex == learningOffAt)):
self._model.disableLearning()
try:
inputRecord = self._inputSource.getNextRecordDict()
if (self._currentRecordIndex < 0):
self._inputSource.setTimeout(10)
except Exception as e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args), traceback.format_exc())
if (inputRecord is None):
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
result.metrics = self.__metricMgr.update(result)
if (not result.metrics):
result.metrics = self.__metricMgr.getMetrics()
if (InferenceElement.encodings in result.inferences):
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
self._periodic.tick()
if ((numIters >= 0) and (self._currentRecordIndex >= (numIters - 1))):
break
else:
raise ValueError(('Got an empty record from FileSource: %r' % inputRecord))
|
'Run final activities after a model has run. These include recording and
logging the final score'
| def _finalize(self):
| self._logger.info('Finished: modelID=%r; %r records processed. Performing final activities', self._modelID, (self._currentRecordIndex + 1))
self._updateModelDBResults()
if (not self._isKilled):
self.__updateJobResults()
else:
self.__deleteOutputCache(self._modelID)
if self._predictionLogger:
self._predictionLogger.close()
|
'Create a checkpoint from the current model, and store it in a dir named
after checkpoint GUID, and finally store the GUID in the Models DB'
| def __createModelCheckpoint(self):
| if ((self._model is None) or (self._modelCheckpointGUID is None)):
return
if (self._predictionLogger is None):
self._createPredictionLogger()
predictions = StringIO.StringIO()
self._predictionLogger.checkpoint(checkpointSink=predictions, maxRows=int(Configuration.get('nupic.model.checkpoint.maxPredictionRows')))
self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
self._jobsDAO.modelSetFields(modelID, {'modelCheckpointId': str(self._modelCheckpointGUID)}, ignoreUnchanged=True)
self._logger.info('Checkpointed Hypersearch Model: modelID: %r, checkpointID: %r', self._modelID, checkpointID)
return
|
'Delete the stored checkpoint for the specified modelID. This function is
called if the current model is now the best model, making the old model\'s
checkpoint obsolete
Parameters:
modelID: The modelID for the checkpoint to delete. This is NOT the
unique checkpointID'
| def __deleteModelCheckpoint(self, modelID):
| checkpointID = self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0]
if (checkpointID is None):
return
try:
shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
except:
self._logger.warn('Failed to delete model checkpoint %s. Assuming that another worker has already deleted it', checkpointID)
return
self._jobsDAO.modelSetFields(modelID, {'modelCheckpointId': None}, ignoreUnchanged=True)
return
|
'Creates the model\'s PredictionLogger object, which is an interface to write
model results to a permanent storage location'
| def _createPredictionLogger(self):
| self._predictionLogger = BasicPredictionLogger(fields=self._model.getFieldInfo(), experimentDir=self._experimentDir, label='hypersearch-worker', inferenceType=self._model.getInferenceType())
if self.__loggedMetricPatterns:
metricLabels = self.__metricMgr.getMetricLabels()
loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)
self._predictionLogger.setLoggedMetrics(loggedMetrics)
|
'Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over'
| def __getOptimizedMetricLabel(self):
| matchingKeys = matchPatterns([self._optimizeKeyPattern], self._getMetricLabels())
if (len(matchingKeys) == 0):
raise Exception(('None of the generated metrics match the specified optimization pattern: %s. Available metrics are %s' % (self._optimizeKeyPattern, self._getMetricLabels())))
elif (len(matchingKeys) > 1):
raise Exception(("The specified optimization pattern '%s' matches more than one metric: %s" % (self._optimizeKeyPattern, matchingKeys)))
return matchingKeys[0]
|
'Returns: A list of labels that correspond to metrics being computed'
| def _getMetricLabels(self):
| return self.__metricMgr.getMetricLabels()
|
'Method which returns a dictionary of field statistics received from the
input source.
Returns:
fieldStats: dict of dicts where the first level is the field name and
the second level is the statistic. ie. fieldStats[\'pounds\'][\'min\']'
| def _getFieldStats(self):
| fieldStats = dict()
fieldNames = self._inputSource.getFieldNames()
for field in fieldNames:
curStats = dict()
curStats['min'] = self._inputSource.getFieldMin(field)
curStats['max'] = self._inputSource.getFieldMax(field)
fieldStats[field] = curStats
return fieldStats
|
'Protected function that can be overriden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model'
| def _getMetrics(self):
| return self.__metricMgr.getMetrics()
|
'Retrieves the current results and updates the model\'s record in
the Model database.'
| def _updateModelDBResults(self):
| metrics = self._getMetrics()
reportDict = dict([(k, metrics[k]) for k in self._reportMetricLabels])
metrics = self._getMetrics()
optimizeDict = dict()
if (self._optimizeKeyPattern is not None):
optimizeDict[self._optimizedMetricLabel] = metrics[self._optimizedMetricLabel]
results = json.dumps((metrics, optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results, metricValue=optimizeDict.values()[0], numRecords=(self._currentRecordIndex + 1))
self._logger.debug(('Model Results: modelID=%s; numRecords=%s; results=%s' % (self._modelID, (self._currentRecordIndex + 1), results)))
return
|
'Periodic check to see if this is the best model. This should only have an
effect if this is the *first* model to report its progress'
| def __updateJobResultsPeriodic(self):
| if (self._isBestModelStored and (not self._isBestModel)):
return
while True:
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if (jobResultsStr is None):
jobResults = {}
else:
self._isBestModelStored = True
if (not self._isBestModel):
return
jobResults = json.loads(jobResultsStr)
bestModel = jobResults.get('bestModel', None)
bestMetric = jobResults.get('bestValue', None)
isSaved = jobResults.get('saved', False)
if ((bestModel is not None) and (self._modelID != bestModel)):
self._isBestModel = False
return
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = False
newResults = json.dumps(jobResults)
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID, fieldName='results', curValue=jobResultsStr, newValue=newResults)
if (isUpdated or ((not isUpdated) and (newResults == jobResultsStr))):
self._isBestModel = True
break
|
'Reads the current "best model" for the job and returns whether or not the
current model is better than the "best model" stored for the job
Returns: (isBetter, storedBest, origResultsStr)
isBetter:
True if the current model is better than the stored "best model"
storedResults:
A dict of the currently stored results in the jobs table record
origResultsStr:
The json-encoded string that currently resides in the "results" field
of the jobs record (used to create atomicity)'
| def __checkIfBestCompletedModel(self):
| jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if (jobResultsStr is None):
jobResults = {}
else:
jobResults = json.loads(jobResultsStr)
isSaved = jobResults.get('saved', False)
bestMetric = jobResults.get('bestValue', None)
currentMetric = self._getMetrics()[self._optimizedMetricLabel]
self._isBestModel = ((not isSaved) or (currentMetric < bestMetric))
return (self._isBestModel, jobResults, jobResultsStr)
|
'Check if this is the best model
If so:
1) Write it\'s checkpoint
2) Record this model as the best
3) Delete the previous best\'s output cache
Otherwise:
1) Delete our output cache'
| def __updateJobResults(self):
| isSaved = False
while True:
(self._isBestModel, jobResults, jobResultsStr) = self.__checkIfBestCompletedModel()
if self._isBestModel:
if (not isSaved):
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__createModelCheckpoint()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
isSaved = True
prevBest = jobResults.get('bestModel', None)
prevWasSaved = jobResults.get('saved', False)
if (prevBest == self._modelID):
assert (not prevWasSaved)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = True
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID, fieldName='results', curValue=jobResultsStr, newValue=json.dumps(jobResults))
if isUpdated:
if prevWasSaved:
self.__deleteOutputCache(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self._logger.info('Model %d chosen as best model', self._modelID)
break
else:
self.__deleteOutputCache(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
break
|
'Writes the results of one iteration of a model. The results are written to
this ModelRunner\'s in-memory cache unless this model is the "best model" for
the job. If this model is the "best model", the predictions are written out
to a permanent store via a prediction output stream instance
Parameters:
result: A opf_utils.ModelResult object, which contains the input and
output for this iteration'
| def _writePrediction(self, result):
| self.__predictionCache.append(result)
if self._isBestModel:
self.__flushPredictionCache()
|
'This callback is called by self.__predictionLogger.writeRecords()
between each batch of records it writes. It gives us a chance to say that
the model is \'still alive\' during long write operations.'
| def __writeRecordsCallback(self):
| self._jobsDAO.modelUpdateResults(self._modelID)
|
'Writes the contents of this model\'s in-memory prediction cache to a permanent
store via the prediction output stream instance'
| def __flushPredictionCache(self):
| if (not self.__predictionCache):
return
if (self._predictionLogger is None):
self._createPredictionLogger()
startTime = time.time()
self._predictionLogger.writeRecords(self.__predictionCache, progressCB=self.__writeRecordsCallback)
self._logger.info('Flushed prediction cache; numrows=%s; elapsed=%s sec.', len(self.__predictionCache), (time.time() - startTime))
self.__predictionCache.clear()
|
'Delete\'s the output cache associated with the given modelID. This actually
clears up the resources associated with the cache, rather than deleting al
the records in the cache
Parameters:
modelID: The id of the model whose output cache is being deleted'
| def __deleteOutputCache(self, modelID):
| if ((modelID == self._modelID) and (self._predictionLogger is not None)):
self._predictionLogger.close()
del self.__predictionCache
self._predictionLogger = None
self.__predictionCache = None
|
'Creates and returns a PeriodicActivityMgr instance initialized with
our periodic activities
Parameters:
retval: a PeriodicActivityMgr instance'
| def _initPeriodicActivities(self):
| updateModelDBResults = PeriodicActivityRequest(repeating=True, period=100, cb=self._updateModelDBResults)
updateJobResults = PeriodicActivityRequest(repeating=True, period=100, cb=self.__updateJobResultsPeriodic)
checkCancelation = PeriodicActivityRequest(repeating=True, period=50, cb=self.__checkCancelation)
checkMaturity = PeriodicActivityRequest(repeating=True, period=10, cb=self.__checkMaturity)
updateJobResultsFirst = PeriodicActivityRequest(repeating=False, period=2, cb=self.__updateJobResultsPeriodic)
periodicActivities = [updateModelDBResults, updateJobResultsFirst, updateJobResults, checkCancelation]
if self._isMaturityEnabled:
periodicActivities.append(checkMaturity)
return PeriodicActivityMgr(requestedActivities=periodicActivities)
|
'Check if the cancelation flag has been set for this model
in the Model DB'
| def __checkCancelation(self):
| print >>sys.stderr, 'reporter:counter:HypersearchWorker,numRecords,50'
jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isCanceled = True
self._logger.info('Model %s canceled because Job %s was stopped.', self._modelID, self._jobID)
else:
stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['engStop'])[0]
if (stopReason is None):
pass
elif (stopReason == ClientJobsDAO.STOP_REASON_KILLED):
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isKilled = True
self._logger.info('Model %s canceled because it was killed by hypersearch', self._modelID)
elif (stopReason == ClientJobsDAO.STOP_REASON_STOPPED):
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isCanceled = True
self._logger.info('Model %s stopped because hypersearch ended', self._modelID)
else:
raise RuntimeError(('Unexpected stop reason encountered: %s' % stopReason))
|
'Save the current metric value and see if the model\'s performance has
\'leveled off.\' We do this by looking at some number of previous number of
recordings'
| def __checkMaturity(self):
| if ((self._currentRecordIndex + 1) < self._MIN_RECORDS_TO_BE_BEST):
return
if self._isMature:
return
metric = self._getMetrics()[self._optimizedMetricLabel]
self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric)
(pctChange, absPctChange) = self._metricRegression.getPctChanges()
if ((pctChange is not None) and (absPctChange <= self._MATURITY_MAX_CHANGE)):
self._jobsDAO.modelSetFields(self._modelID, {'engMatured': True})
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isMature = True
self._logger.info('Model %d has matured (pctChange=%s, n=%d). \nScores = %s\nStopping execution', self._modelID, pctChange, self._MATURITY_NUM_POINTS, self._metricRegression._window)
|
'Handles a "warning signal" from the scheduler. This is received when the
scheduler is about to kill the the current process so that the worker can be
allocated to another job.
Right now, this function just sets the current model to the "Orphaned" state
in the models table so that another worker can eventually re-run this model
Parameters:'
| def handleWarningSignal(self, signum, frame):
| self._isInterrupted.set()
|
'Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process.'
| def __setAsOrphaned(self):
| cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN
cmplMessage = 'Killed by Scheduler'
self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
|
'/models
returns:
[model1, model2, model3, ...] list of model names'
| def GET(self):
| global g_models
return json.dumps({'models': g_models.keys()})
|
'/models/{name}
schema:
"modelParams": dict containing model parameters
"predictedFieldName": str
returns:
{"success":name}'
| def POST(self, name):
| global g_models
data = json.loads(web.data())
modelParams = data['modelParams']
predictedFieldName = data['predictedFieldName']
if (name in g_models.keys()):
raise web.badrequest(('Model with name <%s> already exists' % name))
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': predictedFieldName})
g_models[name] = model
return json.dumps({'success': name})
|
'/models/{name}/run
schema:
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore'
| def POST(self, name):
| global g_models
data = json.loads(web.data())
data['timestamp'] = datetime.datetime.strptime(data['timestamp'], '%m/%d/%y %H:%M')
if (name not in g_models.keys()):
raise web.notfound(('Model with name <%s> does not exist.' % name))
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences['anomalyScore']
return json.dumps({'predictionNumber': predictionNumber, 'anomalyScore': anomalyScore})
|
'Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph'
| def export(self):
| graph = nx.MultiDiGraph()
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
for (linkName, link) in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(), link.getDestRegionName(), src=link.getSrcOutputName(), dest=link.getDestInputName())
return graph
|
'Render network. Default is
:class:`~nupic.frameworks.viz.dot_renderer.DotRenderer`.
:param renderer: Constructor parameter to a "renderer" implementation.
Return value for which must have a "render" method that accepts a
single argument (a networkx graph instance).'
| def render(self, renderer=DEFAULT_RENDERER):
| renderer().render(self.export())
|
'filePath: path of file where SP __init__ args are to be saved'
| def __init__(self, filePath):
| self.__filePath = filePath
return
|
'filePath: path of file where TM __init__ args are to be saved'
| def __init__(self, filePath):
| self.__filePath = filePath
return
|
'The PVM does not learn, so this function has no effect.'
| def finishLearning(self):
| pass
|
'Since the PVM has no use for this information, this is a no-op'
| def setFieldStatistics(self, fieldStats):
| pass
|
'Serialize via capnp
:param proto: capnp PreviousValueModelProto message builder'
| def write(self, proto):
| super(PreviousValueModel, self).writeBaseToProto(proto.modelBase)
proto.fieldNames = self._fieldNames
proto.fieldTypes = self._fieldTypes
proto.predictedField = self._predictedField
proto.predictionSteps = self._predictionSteps
|
'Deserialize via capnp
:param proto: capnp PreviousValueModelProto message reader
:returns: new instance of PreviousValueModel deserialized from the given
proto'
| @classmethod
def read(cls, proto):
| instance = object.__new__(cls)
super(PreviousValueModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._predictedField = proto.predictedField
instance._fieldNames = list(proto.fieldNames)
instance._fieldTypes = list(proto.fieldTypes)
instance._predictionSteps = list(proto.predictionSteps)
return instance
|
'Get the logger for this object.
:returns: (Logger) A Logger object.'
| @classmethod
def __getLogger(cls):
| if (cls.__logger is None):
cls.__logger = opf_utils.initLogger(cls)
return cls.__logger
|
'Create a new model instance, given a description dictionary.
:param modelConfig: (dict)
A dictionary describing the current model,
`described here <../../quick-start/example-model-params.html>`_.
:param logLevel: (int) The level of logging output that should be generated
:raises Exception: Unsupported model type
:returns: :class:`nupic.frameworks.opf.model.Model`'
| @staticmethod
def create(modelConfig, logLevel=logging.ERROR):
| logger = ModelFactory.__getLogger()
logger.setLevel(logLevel)
logger.debug('ModelFactory returning Model from dict: %s', modelConfig)
modelClass = None
if (modelConfig['model'] == 'HTMPrediction'):
modelClass = HTMPredictionModel
elif (modelConfig['model'] == 'TwoGram'):
modelClass = TwoGramModel
elif (modelConfig['model'] == 'PreviousValue'):
modelClass = PreviousValueModel
else:
raise Exception(('ModelFactory received unsupported Model type: %s' % modelConfig['model']))
return modelClass(**modelConfig['modelParams'])
|
'Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance.'
| @staticmethod
def loadFromCheckpoint(savedModelDir, newSerialization=False):
| if newSerialization:
return HTMPredictionModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir)
|
'Compute the new metrics values, given the next inference/ground-truth values
:param results: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
object that was computed during the last iteration of the model.
:returns: (dict) where each key is the metric-name, and the values are
it scalar value.'
| def update(self, results):
| self._addResults(results)
if ((not self.__metricSpecs) or (self.__currentInference is None)):
return {}
metricResults = {}
for (metric, spec, label) in zip(self.__metrics, self.__metricSpecs, self.__metricLabels):
inferenceElement = spec.inferenceElement
field = spec.field
groundTruth = self._getGroundTruth(inferenceElement)
inference = self._getInference(inferenceElement)
rawRecord = self._getRawGroundTruth()
result = self.__currentResult
if field:
if (type(inference) in (list, tuple)):
if (field in self.__fieldNameIndexMap):
fieldIndex = self.__fieldNameIndexMap[field]
inference = inference[fieldIndex]
else:
inference = None
if (groundTruth is not None):
if (type(groundTruth) in (list, tuple)):
if (field in self.__fieldNameIndexMap):
fieldIndex = self.__fieldNameIndexMap[field]
groundTruth = groundTruth[fieldIndex]
else:
groundTruth = None
else:
groundTruth = groundTruth[field]
metric.addInstance(groundTruth=groundTruth, prediction=inference, record=rawRecord, result=result)
metricResults[label] = metric.getMetric()['value']
return metricResults
|
'Gets the current metric values
:returns: (dict) where each key is the metric-name, and the values are
it scalar value. Same as the output of
:meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`'
| def getMetrics(self):
| result = {}
for (metricObj, label) in zip(self.__metrics, self.__metricLabels):
value = metricObj.getMetric()
result[label] = value['value']
return result
|
'Gets detailed info about a given metric, in addition to its value. This
may including any statistics or auxilary data that are computed for a given
metric.
:param metricLabel: (string) label of the given metric (see
:class:`~nupic.frameworks.opf.metrics.MetricSpec`)
:returns: (dict) of metric information, as returned by
:meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.'
| def getMetricDetails(self, metricLabel):
| try:
metricIndex = self.__metricLabels.index(metricLabel)
except IndexError:
return None
return self.__metrics[metricIndex].getMetric()
|
':returns: (list) of labels for the metrics that are being calculated'
| def getMetricLabels(self):
| return tuple(self.__metricLabels)
|
'Stores the current model results in the manager\'s internal store
Parameters:
results: A ModelResults object that contains the current timestep\'s
input/inferences'
| def _addResults(self, results):
| if self.__isTemporal:
shiftedInferences = self.__inferenceShifter.shift(results).inferences
self.__currentResult = copy.deepcopy(results)
self.__currentResult.inferences = shiftedInferences
self.__currentInference = shiftedInferences
else:
self.__currentResult = copy.deepcopy(results)
self.__currentInference = copy.deepcopy(results.inferences)
self.__currentGroundTruth = copy.deepcopy(results)
|
'Get the actual value for this field
Parameters:
sensorInputElement: The inference element (part of the inference) that
is being used for this metric'
| def _getGroundTruth(self, inferenceElement):
| sensorInputElement = InferenceElement.getInputElement(inferenceElement)
if (sensorInputElement is None):
return None
return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
|
'Get what the inferred value for this field was
Parameters:
inferenceElement: The inference element (part of the inference) that
is being used for this metric'
| def _getInference(self, inferenceElement):
| if (self.__currentInference is not None):
return self.__currentInference.get(inferenceElement, None)
return None
|
'Get what the inferred value for this field was
Parameters:
inferenceElement: The inference element (part of the inference) that
is being used for this metric'
| def _getRawGroundTruth(self):
| return self.__currentGroundTruth.rawInput
|
'Creates the required metrics modules
Parameters:
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate'
| def __constructMetricsModules(self, metricSpecs):
| if (not metricSpecs):
return
self.__metricSpecs = metricSpecs
for spec in metricSpecs:
if (not InferenceElement.validate(spec.inferenceElement)):
raise ValueError(('Invalid inference element for metric spec: %r' % spec))
self.__metrics.append(metrics.getModule(spec))
self.__metricLabels.append(spec.getLabel())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.