desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Change the values of 1 or more fields in a job. Here, \'fields\' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
This method is for private use by the ClientJobManager only.
Parameters:
jobID: jobID of the job record
fields: dictionary of fields to change
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was not matching jobID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.'
| @logExceptions(_LOGGER)
@g_retrySQL
def jobSetFields(self, jobID, fields, useConnectionID=True, ignoreUnchanged=False):
| assignmentExpressions = ','.join([('%s=%%s' % (self._jobs.pubToDBNameDict[f],)) for f in fields.iterkeys()])
assignmentValues = fields.values()
query = ('UPDATE %s SET %s WHERE job_id=%%s' % (self.jobsTableName, assignmentExpressions))
sqlParams = (assignmentValues + [jobID])
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
if ((result != 1) and (not ignoreUnchanged)):
raise RuntimeError(('Tried to change fields (%r) of jobID=%s conn_id=%r), but an error occurred. result=%r; query=%r' % (assignmentExpressions, jobID, self._connectionID, result, query)))
|
'Change the value of 1 field in a job to \'newValue\', but only if the
current value matches \'curValue\'. The \'fieldName\' is the public name of
the field (camelBack, not the lower_case_only form as stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
jobID: jobID of the job record to modify
fieldName: public field name of the field
newValue: new value of the field to set
curValue: current value to qualify against
retval: True if we successfully modified the field
False if curValue did not match'
| @logExceptions(_LOGGER)
@g_retrySQL
def jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):
| dbFieldName = self._jobs.pubToDBNameDict[fieldName]
conditionValue = []
if isinstance(curValue, bool):
conditionExpression = ('%s IS %s' % (dbFieldName, {True: 'TRUE', False: 'FALSE'}[curValue]))
elif (curValue is None):
conditionExpression = ('%s is NULL' % (dbFieldName,))
else:
conditionExpression = ('%s=%%s' % (dbFieldName,))
conditionValue.append(curValue)
query = ('UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), %s=%%s WHERE job_id=%%s AND %s' % (self.jobsTableName, dbFieldName, conditionExpression))
sqlParams = ([newValue, jobID] + conditionValue)
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
return (result == 1)
|
'Incremet the value of 1 field in a job by increment. The \'fieldName\' is
the public name of the field (camelBack, not the lower_case_only form as
stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
jobID: jobID of the job record to modify
fieldName: public field name of the field
increment: increment is added to the current value of the field'
| @logExceptions(_LOGGER)
@g_retrySQL
def jobIncrementIntField(self, jobID, fieldName, increment=1, useConnectionID=False):
| dbFieldName = self._jobs.pubToDBNameDict[fieldName]
with ConnectionFactory.get() as conn:
query = ('UPDATE %s SET %s=%s+%%s WHERE job_id=%%s' % (self.jobsTableName, dbFieldName, dbFieldName))
sqlParams = [increment, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if (result != 1):
raise RuntimeError(('Tried to increment the field (%r) of jobID=%s (conn_id=%r), but an error occurred. result=%r; query=%r' % (dbFieldName, jobID, self._connectionID, result, query)))
|
'Update the results string and last-update-time fields of a model.
Parameters:
jobID: job ID of model to modify
results: new results (json dict string)'
| @logExceptions(_LOGGER)
@g_retrySQL
def jobUpdateResults(self, jobID, results):
| with ConnectionFactory.get() as conn:
query = ('UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), results=%%s WHERE job_id=%%s' % (self.jobsTableName,))
conn.cursor.execute(query, [results, jobID])
|
'Delete all models from the models table
Parameters:'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelsClearAll(self):
| self._logger.info('Deleting all rows from models table %r', self.modelsTableName)
with ConnectionFactory.get() as conn:
query = ('DELETE FROM %s' % self.modelsTableName)
conn.cursor.execute(query)
|
'Insert a new unique model (based on params) into the model table in the
"running" state. This will return two things: whether or not the model was
actually inserted (i.e. that set of params isn\'t already in the table) and
the modelID chosen for that set of params. Even if the model was not
inserted by this call (it was already there) the modelID of the one already
inserted is returned.
Parameters:
jobID: jobID of the job to add models for
params: params for this model
paramsHash hash of the params, generated by the worker
particleHash hash of the particle info (for PSO). If not provided,
then paramsHash will be used.
retval: (modelID, wasInserted)
modelID: the model ID for this set of params
wasInserted: True if this call ended up inserting the
new model. False if this set of params was already in
the model table.'
| @logExceptions(_LOGGER)
def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):
| if (particleHash is None):
particleHash = paramsHash
paramsHash = self._normalizeHash(paramsHash)
particleHash = self._normalizeHash(particleHash)
def findExactMatchNoRetries(conn):
return self._getOneMatchingRowNoRetries(self._models, conn, {'job_id': jobID, '_eng_params_hash': paramsHash, '_eng_particle_hash': particleHash}, ['model_id', '_eng_worker_conn_id'])
@g_retrySQL
def findExactMatchWithRetries():
with ConnectionFactory.get() as conn:
return findExactMatchNoRetries(conn)
row = findExactMatchWithRetries()
if (row is not None):
return (row[0], False)
@g_retrySQL
def insertModelWithRetries():
" NOTE: it's possible that another process on some machine is attempting\n to insert the same model at the same time as the caller "
with ConnectionFactory.get() as conn:
query = ('INSERT INTO %s (job_id, params, status, _eng_params_hash, _eng_particle_hash, start_time, _eng_last_update_time, _eng_worker_conn_id) VALUES (%%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), UTC_TIMESTAMP(), %%s) ' % (self.modelsTableName,))
sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash, particleHash, self._connectionID)
try:
numRowsAffected = conn.cursor.execute(query, sqlParams)
except Exception as e:
if ('Duplicate entry' not in str(e)):
raise
self._logger.info('Model insert attempt failed with DUP_ENTRY: jobID=%s; paramsHash=%s OR particleHash=%s; %r', jobID, paramsHash.encode('hex'), particleHash.encode('hex'), e)
else:
if (numRowsAffected == 1):
conn.cursor.execute('SELECT LAST_INSERT_ID()')
modelID = conn.cursor.fetchall()[0][0]
if (modelID != 0):
return (modelID, True)
else:
self._logger.warn('SELECT LAST_INSERT_ID for model returned 0, implying loss of connection: jobID=%s; paramsHash=%r; particleHash=%r', jobID, paramsHash, particleHash)
else:
self._logger.error('Attempt to insert model resulted in unexpected numRowsAffected: expected 1, but got %r; jobID=%s; paramsHash=%r; particleHash=%r', numRowsAffected, jobID, paramsHash, particleHash)
row = findExactMatchNoRetries(conn)
if (row is not None):
(modelID, connectionID) = row
return (modelID, (connectionID == self._connectionID))
query = ('SELECT (model_id) FROM %s WHERE job_id=%%s AND (_eng_params_hash=%%s OR _eng_particle_hash=%%s) LIMIT 1 ' % (self.modelsTableName,))
sqlParams = [jobID, paramsHash, particleHash]
numRowsFound = conn.cursor.execute(query, sqlParams)
assert (numRowsFound == 1), ('Model not found: jobID=%s AND (paramsHash=%r OR particleHash=%r); numRowsFound=%r' % (jobID, paramsHash, particleHash, numRowsFound))
(modelID,) = conn.cursor.fetchall()[0]
return (modelID, False)
return insertModelWithRetries()
|
'Get ALL info for a set of models
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
modelIDs: list of model IDs
retval: list of nametuples containing all the fields stored for each
model.'
| @logExceptions(_LOGGER)
def modelsInfo(self, modelIDs):
| assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('wrong modelIDs type: %s' % (type(modelIDs),))
assert modelIDs, 'modelIDs is empty'
rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs), [self._models.pubToDBNameDict[f] for f in self._models.modelInfoNamedTuple._fields])
results = [self._models.modelInfoNamedTuple._make(r) for r in rows]
assert (len(results) == len(modelIDs)), ('modelIDs not found: %s' % (set(modelIDs) - set((r.modelId for r in results))))
return results
|
'Fetch the values of 1 or more fields from a sequence of model records.
Here, \'fields\' is a list with the names of the fields to fetch. The names
are the public names of the fields (camelBack, not the lower_case_only form
as stored in the DB).
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
Parameters:
modelIDs: A single modelID or sequence of modelIDs
fields: A list of fields to return
Returns: If modelIDs is a sequence:
a list of tuples->(modelID, [field1, field2,...])
If modelIDs is a single modelID:
a list of field values->[field1, field2,...]'
| @logExceptions(_LOGGER)
def modelsGetFields(self, modelIDs, fields):
| assert (len(fields) >= 1), 'fields is empty'
isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES)
if isSequence:
assert (len(modelIDs) >= 1), 'modelIDs is empty'
else:
modelIDs = [modelIDs]
rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs), (['model_id'] + [self._models.pubToDBNameDict[f] for f in fields]))
if (len(rows) < len(modelIDs)):
raise RuntimeError(('modelIDs not found within the models table: %s' % ((set(modelIDs) - set((r[0] for r in rows))),)))
if (not isSequence):
return list(rows[0][1:])
return [(r[0], list(r[1:])) for r in rows]
|
'Gets the specified fields for all the models for a single job. This is
similar to modelsGetFields
Parameters:
jobID: jobID for the models to be searched
fields: A list of fields to return
ignoreKilled: (True/False). If True, this will ignore models that
have been killed
Returns: a (possibly empty) list of tuples as follows
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
NOTE: since there is a window of time between a job getting inserted into
jobs table and the job\'s worker(s) starting up and creating models, an
empty-list result is one of the normal outcomes.'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):
| assert (len(fields) >= 1), 'fields is empty'
dbFields = [self._models.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(dbFields)
query = ('SELECT model_id, %s FROM %s WHERE job_id=%%s ' % (dbFieldsStr, self.modelsTableName))
sqlParams = [jobID]
if ignoreKilled:
query += ' AND (completion_reason IS NULL OR completion_reason != %s)'
sqlParams.append(self.CMPL_REASON_KILLED)
with ConnectionFactory.get() as conn:
conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
if (rows is None):
self._logger.error('Unexpected None result from cursor.fetchall; query=%r; Traceback=%r', query, traceback.format_exc())
return [(r[0], list(r[1:])) for r in rows]
|
'Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForCheckpointed(self, jobID, fields):
| assert (len(fields) >= 1), 'fields is empty'
with ConnectionFactory.get() as conn:
dbFields = [self._models.pubToDBNameDict[f] for f in fields]
dbFieldStr = ', '.join(dbFields)
query = 'SELECT model_id, {fields} from {models} WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format(fields=dbFieldStr, models=self.modelsTableName)
conn.cursor.execute(query, [jobID])
rows = conn.cursor.fetchall()
return [(r[0], list(r[1:])) for r in rows]
|
'Change the values of 1 or more fields in a model. Here, \'fields\' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
Parameters:
jobID: jobID of the job record
fields: dictionary of fields to change
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was no matching modelID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelSetFields(self, modelID, fields, ignoreUnchanged=False):
| assignmentExpressions = ','.join((('%s=%%s' % (self._models.pubToDBNameDict[f],)) for f in fields.iterkeys()))
assignmentValues = fields.values()
query = ('UPDATE %s SET %s, update_counter = update_counter+1 WHERE model_id=%%s' % (self.modelsTableName, assignmentExpressions))
sqlParams = (assignmentValues + [modelID])
with ConnectionFactory.get() as conn:
numAffectedRows = conn.cursor.execute(query, sqlParams)
self._logger.debug('Executed: numAffectedRows=%r, query=%r, sqlParams=%r', numAffectedRows, query, sqlParams)
if ((numAffectedRows != 1) and (not ignoreUnchanged)):
raise RuntimeError(('Tried to change fields (%r) of model %r (conn_id=%r), but an error occurred. numAffectedRows=%r; query=%r; sqlParams=%r' % (fields, modelID, self._connectionID, numAffectedRows, query, sqlParams)))
|
'Get the params and paramsHash for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
modelIDs: list of model IDs
retval: list of result namedtuples defined in
ClientJobsDAO._models.getParamsNamedTuple. Each tuple
contains: (modelId, params, engParamsHash)'
| @logExceptions(_LOGGER)
def modelsGetParams(self, modelIDs):
| assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('Wrong modelIDs type: %r' % (type(modelIDs),))
assert (len(modelIDs) >= 1), 'modelIDs is empty'
rows = self._getMatchingRowsWithRetries(self._models, {'model_id': modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getParamsNamedTuple._fields])
assert (len(rows) == len(modelIDs)), ("Didn't find modelIDs: %r" % ((set(modelIDs) - set((r[0] for r in rows))),))
return [self._models.getParamsNamedTuple._make(r) for r in rows]
|
'Get the results string and other status fields for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
For each model, this returns a tuple containing:
(modelID, results, status, updateCounter, numRecords, completionReason,
completionMsg, engParamsHash
Parameters:
modelIDs: list of model IDs
retval: list of result tuples. Each tuple contains:
(modelID, results, status, updateCounter, numRecords,
completionReason, completionMsg, engParamsHash)'
| @logExceptions(_LOGGER)
def modelsGetResultAndStatus(self, modelIDs):
| assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('Wrong modelIDs type: %r' % type(modelIDs))
assert (len(modelIDs) >= 1), 'modelIDs is empty'
rows = self._getMatchingRowsWithRetries(self._models, {'model_id': modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getResultAndStatusNamedTuple._fields])
assert (len(rows) == len(modelIDs)), ("Didn't find modelIDs: %r" % ((set(modelIDs) - set((r[0] for r in rows))),))
return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]
|
'Return info on all of the models that are in already in the models
table for a given job. For each model, this returns a tuple
containing: (modelID, updateCounter).
Note that we don\'t return the results for all models, since the results
string could be quite large. The information we are returning is
just 2 integer fields.
Parameters:
jobID: jobID to query
retval: (possibly empty) list of tuples. Each tuple contains:
(modelID, updateCounter)'
| @logExceptions(_LOGGER)
def modelsGetUpdateCounters(self, jobID):
| rows = self._getMatchingRowsWithRetries(self._models, {'job_id': jobID}, [self._models.pubToDBNameDict[f] for f in self._models.getUpdateCountersNamedTuple._fields])
return [self._models.getUpdateCountersNamedTuple._make(r) for r in rows]
|
'Update the results string, and/or num_records fields of
a model. This will fail if the model does not currently belong to this
client (connection_id doesn\'t match).
Parameters:
modelID: model ID of model to modify
results: new results, or None to ignore
metricValue: the value of the metric being optimized, or None to ignore
numRecords: new numRecords, or None to ignore'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelUpdateResults(self, modelID, results=None, metricValue=None, numRecords=None):
| assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()', 'update_counter=update_counter+1']
assignmentValues = []
if (results is not None):
assignmentExpressions.append('results=%s')
assignmentValues.append(results)
if (numRecords is not None):
assignmentExpressions.append('num_records=%s')
assignmentValues.append(numRecords)
if ((metricValue is not None) and (metricValue == metricValue)):
assignmentExpressions.append('optimized_metric=%s')
assignmentValues.append(float(metricValue))
query = ('UPDATE %s SET %s WHERE model_id=%%s and _eng_worker_conn_id=%%s' % (self.modelsTableName, ','.join(assignmentExpressions)))
sqlParams = (assignmentValues + [modelID, self._connectionID])
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if (numRowsAffected != 1):
raise InvalidConnectionException(('Tried to update the info of modelID=%r using connectionID=%r, but this model belongs to some other worker or modelID not found; numRowsAffected=%r' % (modelID, self._connectionID, numRowsAffected)))
|
'Mark a model as completed, with the given completionReason and
completionMsg. This will fail if the model does not currently belong to this
client (connection_id doesn\'t match).
Parameters:
modelID: model ID of model to modify
completionReason: completionReason string
completionMsg: completionMsg string
cpuTime: amount of CPU time spent on this model
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the
job. Set to True for hypersearch workers, which use
this mechanism for orphaned model detection.'
| @logExceptions(_LOGGER)
@g_retrySQL
def modelSetCompleted(self, modelID, completionReason, completionMsg, cpuTime=0, useConnectionID=True):
| if (completionMsg is None):
completionMsg = ''
query = ('UPDATE %s SET status=%%s, completion_reason=%%s, completion_msg=%%s, end_time=UTC_TIMESTAMP(), cpu_time=%%s, _eng_last_update_time=UTC_TIMESTAMP(), update_counter=update_counter+1 WHERE model_id=%%s' % (self.modelsTableName,))
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg, cpuTime, modelID]
if useConnectionID:
query += ' AND _eng_worker_conn_id=%s'
sqlParams.append(self._connectionID)
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if (numRowsAffected != 1):
raise InvalidConnectionException(('Tried to set modelID=%r using connectionID=%r, but this model belongs to some other worker or modelID not found; numRowsAffected=%r' % (modelID, self._connectionID, numRowsAffected)))
|
'Look through the models table for an orphaned model, which is a model
that is not completed yet, whose _eng_last_update_time is more than
maxUpdateInterval seconds ago.
If one is found, change its _eng_worker_conn_id to the current worker\'s
and return the model id.
Parameters:
retval: modelId of the model we adopted, or None if none found'
| @logExceptions(_LOGGER)
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
| @g_retrySQL
def findCandidateModelWithRetries():
modelID = None
with ConnectionFactory.get() as conn:
query = ('SELECT model_id FROM %s WHERE status=%%s AND job_id=%%s AND TIMESTAMPDIFF(SECOND, _eng_last_update_time, UTC_TIMESTAMP()) > %%s LIMIT 1 ' % (self.modelsTableName,))
sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]
numRows = conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
assert (numRows <= 1), ('Unexpected numRows: %r' % numRows)
if (numRows == 1):
(modelID,) = rows[0]
return modelID
@g_retrySQL
def adoptModelWithRetries(modelID):
adopted = False
with ConnectionFactory.get() as conn:
query = ('UPDATE %s SET _eng_worker_conn_id=%%s, _eng_last_update_time=UTC_TIMESTAMP() WHERE model_id=%%s AND status=%%s AND TIMESTAMPDIFF(SECOND, _eng_last_update_time, UTC_TIMESTAMP()) > %%s LIMIT 1 ' % (self.modelsTableName,))
sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING, maxUpdateInterval]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert (numRowsAffected <= 1), ('Unexpected numRowsAffected=%r' % (numRowsAffected,))
if (numRowsAffected == 1):
adopted = True
else:
(status, connectionID) = self._getOneMatchingRowNoRetries(self._models, conn, {'model_id': modelID}, ['status', '_eng_worker_conn_id'])
adopted = ((status == self.STATUS_RUNNING) and (connectionID == self._connectionID))
return adopted
adoptedModelID = None
while True:
modelID = findCandidateModelWithRetries()
if (modelID is None):
break
if adoptModelWithRetries(modelID):
adoptedModelID = modelID
break
return adoptedModelID
|
'Acquire a ConnectionWrapper instance that represents a connection
to the SQL server per nupic.cluster.database.* configuration settings.
NOTE: caller is responsible for calling the ConnectionWrapper instance\'s
release() method after using the connection in order to release resources.
Better yet, use the returned ConnectionWrapper instance in a Context Manager
statement for automatic invocation of release():
Example:
# If using Jython 2.5.x, first import with_statement at the very top of
your script (don\'t need this import for Jython/Python 2.6.x and later):
from __future__ import with_statement
# Then:
from nupic.database.Connection import ConnectionFactory
# Then use it like this
with ConnectionFactory.get() as conn:
conn.cursor.execute("SELECT ...")
conn.cursor.fetchall()
conn.cursor.execute("INSERT ...")
WARNING: DO NOT close the underlying connection or cursor as it may be
shared by other modules in your process. ConnectionWrapper\'s release()
method will do the right thing.
Parameters:
retval: A ConnectionWrapper instance. NOTE: Caller is responsible
for releasing resources as described above.'
| @classmethod
def get(cls):
| if (cls._connectionPolicy is None):
logger = _getLogger(cls)
logger.info('Creating db connection policy via provider %r', cls._connectionPolicyInstanceProvider)
cls._connectionPolicy = cls._connectionPolicyInstanceProvider()
logger.debug('Created connection policy: %r', cls._connectionPolicy)
return cls._connectionPolicy.acquireConnection()
|
'Close ConnectionFactory\'s connection policy. Typically, there is no need
to call this method as the system will automatically close the connections
when the process exits.
NOTE: This method should be used with CAUTION. It is designed to be
called ONLY by the code responsible for startup and shutdown of the process
since it closes the connection(s) used by ALL clients in this process.'
| @classmethod
def close(cls):
| if (cls._connectionPolicy is not None):
cls._connectionPolicy.close()
cls._connectionPolicy = None
return
|
'Set the method for ConnectionFactory to use when it needs to
instantiate its database connection policy.
NOTE: This method should be used with CAUTION. ConnectionFactory\'s default
behavior should be adequate for all NuPIC code, and this method is provided
primarily for diagnostics. It is designed to only be called by the code
responsible for startup of the process since the provider method has no
impact after ConnectionFactory\'s connection policy instance is instantiated.
See ConnectionFactory._createDefaultPolicy
Parameters:
provider: The method that instantiates the singleton database
connection policy to be used by ConnectionFactory class.
The method must be compatible with the following signature:
<DatabaseConnectionPolicyIface subclass instance> provider()'
| @classmethod
def setConnectionPolicyProvider(cls, provider):
| cls._connectionPolicyInstanceProvider = provider
return
|
'[private] Create the default database connection policy instance
Parameters:
retval: The default database connection policy instance'
| @classmethod
def _createDefaultPolicy(cls):
| logger = _getLogger(cls)
logger.debug('Creating database connection policy: platform=%r; pymysql.VERSION=%r', platform.system(), pymysql.VERSION)
if (platform.system() == 'Java'):
policy = SingleSharedConnectionPolicy()
else:
policy = PooledConnectionPolicy()
return policy
|
'Parameters:
dbConn: the underlying database connection instance
cursor: database cursor
releaser: a method to call to release the connection and cursor;
method signature:
None dbConnReleaser(dbConn, cursor)'
| def __init__(self, dbConn, cursor, releaser, logger):
| global g_max_concurrency
try:
self._logger = logger
self.dbConn = dbConn
' database connection instance '
self.cursor = cursor
" Public cursor instance. Don't close it directly: Connection.release()\n will do the right thing.\n "
self._releaser = releaser
self._addedToInstanceSet = False
' True if we added self to _clsOutstandingInstances '
self._creationTracebackString = None
' Instance creation traceback string (if g_max_concurrency is enabled) '
if (g_max_concurrency is not None):
self._trackInstanceAndCheckForConcurrencyViolation()
logger.debug('Acquired: %r; numOutstanding=%s', self, self._clsNumOutstanding)
except:
logger.exception('Exception while instantiating %r;', self)
if self._addedToInstanceSet:
self._clsOutstandingInstances.remove(self)
releaser(dbConn=dbConn, cursor=cursor)
raise
else:
self.__class__._clsNumOutstanding += 1
return
|
'[Context Manager protocol method] Permit a ConnectionWrapper instance
to be used in a context manager expression (with ... as:) to facilitate
robust release of resources (instead of try:/finally:/release()). See
examples in ConnectionFactory docstring.'
| def __enter__(self):
| return self
|
'[Context Manager protocol method] Release resources.'
| def __exit__(self, exc_type, exc_val, exc_tb):
| self.release()
return False
|
'Release the database connection and cursor
The receiver of the Connection instance MUST call this method in order
to reclaim resources'
| def release(self):
| self._logger.debug('Releasing: %r', self)
if self._addedToInstanceSet:
try:
self._clsOutstandingInstances.remove(self)
except:
self._logger.exception('Failed to remove self from _clsOutstandingInstances: %r;', self)
raise
self._releaser(dbConn=self.dbConn, cursor=self.cursor)
self.__class__._clsNumOutstanding -= 1
assert (self._clsNumOutstanding >= 0), ('_clsNumOutstanding=%r' % (self._clsNumOutstanding,))
self._releaser = None
self.cursor = None
self.dbConn = None
self._creationTracebackString = None
self._addedToInstanceSet = False
self._logger = None
return
|
'Check for concurrency violation and add self to
_clsOutstandingInstances.
ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is
incremented'
| def _trackInstanceAndCheckForConcurrencyViolation(self):
| global g_max_concurrency, g_max_concurrency_raise_exception
assert (g_max_concurrency is not None)
assert (self not in self._clsOutstandingInstances), repr(self)
self._creationTracebackString = traceback.format_stack()
if (self._clsNumOutstanding >= g_max_concurrency):
errorMsg = ('With numOutstanding=%r, exceeded concurrency limit=%r when requesting %r. OTHER TRACKED UNRELEASED INSTANCES (%s): %r' % (self._clsNumOutstanding, g_max_concurrency, self, len(self._clsOutstandingInstances), self._clsOutstandingInstances))
self._logger.error(errorMsg)
if g_max_concurrency_raise_exception:
raise ConcurrencyExceededError(errorMsg)
self._clsOutstandingInstances.add(self)
self._addedToInstanceSet = True
return
|
'Close the policy instance and its shared database connection.'
| def close(self):
| raise NotImplementedError()
|
'Get a Connection instance.
Parameters:
retval: A ConnectionWrapper instance.
Caller is responsible for calling the ConnectionWrapper
instance\'s release() method to release resources.'
| def acquireConnection(self):
| raise NotImplementedError()
|
'Consruct an instance. The instance\'s open() method must be
called to make it ready for acquireConnection() calls.'
| def __init__(self):
| self._logger = _getLogger(self.__class__)
self._conn = SteadyDB.connect(**_getCommonSteadyDBArgsDict())
self._logger.debug('Created %s', self.__class__.__name__)
return
|
'Close the policy instance and its shared database connection.'
| def close(self):
| self._logger.info('Closing')
if (self._conn is not None):
self._conn.close()
self._conn = None
else:
self._logger.warning('close() called, but connection policy was alredy closed')
return
|
'Get a Connection instance.
Parameters:
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance\'s release() method or use it in a context manager
expression (with ... as:) to release resources.'
| def acquireConnection(self):
| self._logger.debug('Acquiring connection')
self._conn._ping_check()
connWrap = ConnectionWrapper(dbConn=self._conn, cursor=self._conn.cursor(), releaser=self._releaseConnection, logger=self._logger)
return connWrap
|
'Release database connection and cursor; passed as a callback to
ConnectionWrapper'
| def _releaseConnection(self, dbConn, cursor):
| self._logger.debug('Releasing connection')
cursor.close()
return
|
'Consruct an instance. The instance\'s open() method must be
called to make it ready for acquireConnection() calls.'
| def __init__(self):
| self._logger = _getLogger(self.__class__)
self._logger.debug('Opening')
self._pool = PooledDB(**_getCommonSteadyDBArgsDict())
self._logger.info('Created %s', self.__class__.__name__)
return
|
'Close the policy instance and its database connection pool.'
| def close(self):
| self._logger.info('Closing')
if (self._pool is not None):
self._pool.close()
self._pool = None
else:
self._logger.warning('close() called, but connection policy was alredy closed')
return
|
'Get a connection from the pool.
Parameters:
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance\'s release() method or use it in a context manager
expression (with ... as:) to release resources.'
| def acquireConnection(self):
| self._logger.debug('Acquiring connection')
dbConn = self._pool.connection(shareable=False)
connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger)
return connWrap
|
'Release database connection and cursor; passed as a callback to
ConnectionWrapper'
| def _releaseConnection(self, dbConn, cursor):
| self._logger.debug('Releasing connection')
cursor.close()
dbConn.close()
return
|
'Consruct an instance. The instance\'s open() method must be
called to make it ready for acquireConnection() calls.'
| def __init__(self):
| self._logger = _getLogger(self.__class__)
self._opened = True
self._logger.info('Created %s', self.__class__.__name__)
return
|
'Close the policy instance.'
| def close(self):
| self._logger.info('Closing')
if self._opened:
self._opened = False
else:
self._logger.warning('close() called, but connection policy was alredy closed')
return
|
'Create a Connection instance.
Parameters:
retval: A ConnectionWrapper instance. NOTE: Caller
is responsible for calling the ConnectionWrapper
instance\'s release() method or use it in a context manager
expression (with ... as:) to release resources.'
| def acquireConnection(self):
| self._logger.debug('Acquiring connection')
dbConn = SteadyDB.connect(**_getCommonSteadyDBArgsDict())
connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger)
return connWrap
|
'Release database connection and cursor; passed as a callback to
ConnectionWrapper'
| def _releaseConnection(self, dbConn, cursor):
| self._logger.debug('Releasing connection')
cursor.close()
dbConn.close()
return
|
'n is the total bits in input
w is the number of bits used to encode each input bit'
| def __init__(self, n, w=None, name='sparse_pass_through', forced=False, verbosity=0):
| super(SparsePassThroughEncoder, self).__init__(n, w, name, forced, verbosity)
|
'See method description in base.py'
| def encodeIntoArray(self, value, output):
| denseInput = numpy.zeros(output.shape)
try:
denseInput[value] = 1
except IndexError:
if isinstance(value, numpy.ndarray):
raise ValueError('Numpy array must have integer dtype but got {}'.format(value.dtype))
raise
super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output)
|
'[ScalarEncoder class method override]'
| def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0, resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
| self._learningEnabled = True
self._stateLock = False
self.width = 0
self.encoders = None
self.description = []
self.name = name
if periodic:
raise Exception('Delta encoder does not encode periodic inputs')
assert (n != 0)
self._adaptiveScalarEnc = AdaptiveScalarEncoder(w=w, n=n, minval=minval, maxval=maxval, clipInput=True, name=name, verbosity=verbosity, forced=forced)
self.width += self._adaptiveScalarEnc.getWidth()
self.n = self._adaptiveScalarEnc.n
self._prevAbsolute = None
self._prevDelta = None
|
'[ScalarEncoder class method override]'
| def topDownCompute(self, encoded):
| if ((self._prevAbsolute == None) or (self._prevDelta == None)):
return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
ret = self._adaptiveScalarEnc.topDownCompute(encoded)
if (self._prevAbsolute != None):
ret = [EncoderResult(value=(ret[0].value + self._prevAbsolute), scalar=(ret[0].scalar + self._prevAbsolute), encoding=ret[0].encoding)]
return ret
|
'Encoder class virtual method override'
| def getDecoderOutputFieldTypes(self):
| return (FieldMetaType.float,)
|
'Convert the input, which is in normal space, into log space'
| def _getScaledValue(self, inpt):
| if (inpt == SENTINEL_VALUE_FOR_MISSING_DATA):
return None
else:
val = inpt
if (val < self.minval):
val = self.minval
elif (val > self.maxval):
val = self.maxval
scaledVal = math.log10(val)
return scaledVal
|
'See the function description in base.py'
| def getBucketIndices(self, inpt):
| scaledVal = self._getScaledValue(inpt)
if (scaledVal is None):
return [None]
else:
return self.encoder.getBucketIndices(scaledVal)
|
'See the function description in base.py'
| def encodeIntoArray(self, inpt, output):
| scaledVal = self._getScaledValue(inpt)
if (scaledVal is None):
output[0:] = 0
else:
self.encoder.encodeIntoArray(scaledVal, output)
if (self.verbosity >= 2):
print 'input:', inpt, 'scaledVal:', scaledVal, 'output:', output
print 'decoded:', self.decodedToStr(self.decode(output))
|
'See the function description in base.py'
| def decode(self, encoded, parentFieldName=''):
| (fieldsDict, fieldNames) = self.encoder.decode(encoded)
if (len(fieldsDict) == 0):
return (fieldsDict, fieldNames)
assert (len(fieldsDict) == 1)
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
for (minV, maxV) in inRanges:
outRanges.append((math.pow(10, minV), math.pow(10, maxV)))
desc = ''
numRanges = len(outRanges)
for i in xrange(numRanges):
if (outRanges[i][0] != outRanges[i][1]):
desc += ('%.2f-%.2f' % (outRanges[i][0], outRanges[i][1]))
else:
desc += ('%.2f' % outRanges[i][0])
if (i < (numRanges - 1)):
desc += ', '
if (parentFieldName != ''):
fieldName = ('%s.%s' % (parentFieldName, self.name))
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName])
|
'See the function description in base.py'
| def getBucketValues(self):
| if (self._bucketValues is None):
scaledValues = self.encoder.getBucketValues()
self._bucketValues = []
for scaledValue in scaledValues:
value = math.pow(10, scaledValue)
self._bucketValues.append(value)
return self._bucketValues
|
'See the function description in base.py'
| def getBucketInfo(self, buckets):
| scaledResult = self.encoder.getBucketInfo(buckets)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return [EncoderResult(value=value, scalar=value, encoding=scaledResult.encoding)]
|
'See the function description in base.py'
| def topDownCompute(self, encoded):
| scaledResult = self.encoder.topDownCompute(encoded)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return EncoderResult(value=value, scalar=value, encoding=scaledResult.encoding)
|
'See the function description in base.py'
| def closenessScores(self, expValues, actValues, fractional=True):
| if (expValues[0] > 0):
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if (actValues[0] > 0):
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs((expValue - actValue))
pctErr = (err / (self.maxScaledValue - self.minScaledValue))
pctErr = min(1.0, pctErr)
closeness = (1.0 - pctErr)
else:
err = abs((expValue - actValue))
closeness = err
return numpy.array([closeness])
|
'See `nupic.encoders.base.Encoder` for more information.'
| def getDescription(self):
| return [('speed', 0), ('longitude', 1), ('latitude', 2), ('altitude', 3)]
|
'See `nupic.encoders.base.Encoder` for more information.'
| def getScalars(self, inputData):
| return numpy.array(([0] * len(self.getDescription())))
|
'See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array'
| def encodeIntoArray(self, inputData, output):
| altitude = None
if (len(inputData) == 4):
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray((coordinate, radius), output)
|
'Returns coordinate for given GPS position.
:param: longitude (float) Longitude of position
:param: latitude (float) Latitude of position
:param: altitude (float) Altitude of position
:returns: (numpy.array) Coordinate that the given GPS position
maps to'
| def coordinateForPosition(self, longitude, latitude, altitude=None):
| coords = PROJ(longitude, latitude)
if (altitude is not None):
coords = transform(PROJ, geocentric, coords[0], coords[1], altitude)
coordinate = numpy.array(coords)
coordinate = (coordinate / self.scale)
return coordinate.astype(int)
|
'Returns radius for given speed.
Tries to get the encodings of consecutive readings to be
adjacent with some overlap.
:param: speed (float) Speed (in meters per second)
:returns: (int) Radius for given speed'
| def radiusForSpeed(self, speed):
| overlap = 1.5
coordinatesPerTimestep = ((speed * self.timestep) / self.scale)
radius = int(round(((float(coordinatesPerTimestep) / 2) * overlap)))
minRadius = int(math.ceil(((math.sqrt(self.w) - 1) / 2)))
return max(radius, minRadius)
|
'Should return the output width, in bits.
:return: (int) output width in bits'
| def getWidth(self):
| raise NotImplementedError()
|
'Encodes inputData and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by :meth:`.getWidth`.
.. note:: The numpy output array is reused, so clear it before updating it.
:param inputData: Data to encode. This should be validated by the encoder.
:param output: numpy 1-D array of same length returned by
:meth:`.getWidth`.'
| def encodeIntoArray(self, inputData, output):
| raise NotImplementedError()
|
'Set whether learning is enabled.
:param learningEnabled: (bool) whether learning should be enabled'
| def setLearning(self, learningEnabled):
| if hasattr(self, '_learningEnabled'):
self._learningEnabled = learningEnabled
|
'This method is called by the model to set the statistics like min and
max for the underlying encoders if this information is available.
:param fieldName: name of the field this encoder is encoding, provided by
:class:`~.nupic.encoders.multi.MultiEncoder`.
:param fieldStatistics: dictionary of dictionaries with the first level being
the fieldname and the second index the statistic ie:
``fieldStatistics[\'pounds\'][\'min\']``'
| def setFieldStats(self, fieldName, fieldStatistics):
| pass
|
'Convenience wrapper for :meth:`.encodeIntoArray`.
This may be less efficient because it allocates a new numpy array every
call.
:param inputData: input data to be encoded
:return: a numpy array with the encoded representation of inputData'
| def encode(self, inputData):
| output = numpy.zeros((self.getWidth(),), dtype=defaultDtype)
self.encodeIntoArray(inputData, output)
return output
|
'Return the field names for each of the scalar values returned by
getScalars.
:param parentFieldName: The name of the encoder which is our parent. This
name is prefixed to each of the field names within this encoder to
form the keys of the dict() in the retval.
:return: array of field names'
| def getScalarNames(self, parentFieldName=''):
| names = []
if (self.encoders is not None):
for (name, encoder, offset) in self.encoders:
subNames = encoder.getScalarNames(parentFieldName=name)
if (parentFieldName != ''):
subNames = [('%s.%s' % (parentFieldName, name)) for name in subNames]
names.extend(subNames)
elif (parentFieldName != ''):
names.append(parentFieldName)
else:
names.append(self.name)
return names
|
'Returns a sequence of field types corresponding to the elements in the
decoded output field array. The types are defined by
:class:`~nupic.data.field_meta.FieldMetaType`.
:return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects'
| def getDecoderOutputFieldTypes(self):
| if (hasattr(self, '_flattenedFieldTypeList') and (self._flattenedFieldTypeList is not None)):
return self._flattenedFieldTypeList
fieldTypes = []
for (name, encoder, offset) in self.encoders:
subTypes = encoder.getDecoderOutputFieldTypes()
fieldTypes.extend(subTypes)
self._flattenedFieldTypeList = fieldTypes
return fieldTypes
|
'Setting this to true freezes the state of the encoder
This is separate from the learning state which affects changing parameters.
Implemented in subclasses.'
| def setStateLock(self, lock):
| pass
|
'Gets the value of a given field from the input record'
| def _getInputValue(self, obj, fieldName):
| if isinstance(obj, dict):
if (not (fieldName in obj)):
knownFields = ', '.join((key for key in obj.keys() if (not key.startswith('_'))))
raise ValueError(("Unknown field name '%s' in input record. Known fields are '%s'.\nThis could be because input headers are mislabeled, or because input data rows do not contain a value for '%s'." % (fieldName, knownFields, fieldName)))
return obj[fieldName]
else:
return getattr(obj, fieldName)
|
':return: a reference to each sub-encoder in this encoder. They are
returned in the same order as they are for :meth:`.getScalarNames`
and :meth:`.getScalars`.'
| def getEncoderList(self):
| if (hasattr(self, '_flattenedEncoderList') and (self._flattenedEncoderList is not None)):
return self._flattenedEncoderList
encoders = []
if (self.encoders is not None):
for (name, encoder, offset) in self.encoders:
subEncoders = encoder.getEncoderList()
encoders.extend(subEncoders)
else:
encoders.append(self)
self._flattenedEncoderList = encoders
return encoders
|
'Returns a numpy array containing the sub-field scalar value(s) for
each sub-field of the ``inputData``. To get the associated field names for
each of the scalar values, call :meth:`.getScalarNames()`.
For a simple scalar encoder, the scalar value is simply the input unmodified.
For category encoders, it is the scalar representing the category string
that is passed in. For the datetime encoder, the scalar value is the
the number of seconds since epoch.
The intent of the scalar representation of a sub-field is to provide a
baseline for measuring error differences. You can compare the scalar value
of the inputData with the scalar value returned from :meth:`.topDownCompute`
on a top-down representation to evaluate prediction accuracy, for example.
:param inputData: The data from the source. This is typically an object with
members
:return: array of scalar values'
| def getScalars(self, inputData):
| retVals = numpy.array([])
if (self.encoders is not None):
for (name, encoder, offset) in self.encoders:
values = encoder.getScalars(self._getInputValue(inputData, name))
retVals = numpy.hstack((retVals, values))
else:
retVals = numpy.hstack((retVals, inputData))
return retVals
|
'Returns the input in the same format as is returned by
:meth:`.topDownCompute`. For most encoder types, this is the same as the
input data. For instance, for scalar and category types, this corresponds to
the numeric and string values, respectively, from the inputs. For datetime
encoders, this returns the list of scalars for each of the sub-fields
(timeOfDay, dayOfWeek, etc.)
This method is essentially the same as :meth:`.getScalars` except that it
returns strings.
:param inputData: The input data in the format it is received from the data
source
:return: A list of values, in the same format and in the same order as they
are returned by :meth:`.topDownCompute`.'
| def getEncodedValues(self, inputData):
| retVals = []
if (self.encoders is not None):
for (name, encoders, offset) in self.encoders:
values = encoders.getEncodedValues(self._getInputValue(inputData, name))
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
elif _isSequence(inputData):
retVals.extend(inputData)
else:
retVals.append(inputData)
return tuple(retVals)
|
'Returns an array containing the sub-field bucket indices for each sub-field
of the inputData. To get the associated field names for each of the buckets,
call :meth:`.getScalarNames`.
:param inputData: The data from the source. This is typically an object with
members.
:return: array of bucket indices'
| def getBucketIndices(self, inputData):
| retVals = []
if (self.encoders is not None):
for (name, encoder, offset) in self.encoders:
values = encoder.getBucketIndices(self._getInputValue(inputData, name))
retVals.extend(values)
else:
assert False, 'Should be implemented in base classes that are not containers for other encoders'
return retVals
|
'Return a pretty print string representing the return values from
:meth:`.getScalars` and :meth:`.getScalarNames`.
:param scalarValues: input values to encode to string
:param scalarNames: optional input of scalar names to convert. If None, gets
scalar names from :meth:`.getScalarNames`
:return: string representation of scalar values'
| def scalarsToStr(self, scalarValues, scalarNames=None):
| if (scalarNames is None):
scalarNames = self.getScalarNames()
desc = ''
for (name, value) in zip(scalarNames, scalarValues):
if (len(desc) > 0):
desc += (', %s:%.2f' % (name, value))
else:
desc += ('%s:%.2f' % (name, value))
return desc
|
'**Must be overridden by subclasses.**
This returns a list of tuples, each containing (``name``, ``offset``).
The ``name`` is a string description of each sub-field, and ``offset`` is
the bit offset of the sub-field for that encoder.
For now, only the \'multi\' and \'date\' encoders have multiple (name, offset)
pairs. All other encoders have a single pair, where the offset is 0.
:return: list of tuples containing (name, offset)'
| def getDescription(self):
| raise Exception('getDescription must be implemented by all subclasses')
|
'Return the offset and length of a given field within the encoded output.
:param fieldName: Name of the field
:return: tuple(``offset``, ``width``) of the field within the encoded output'
| def getFieldDescription(self, fieldName):
| description = (self.getDescription() + [('end', self.getWidth())])
for i in xrange(len(description)):
(name, offset) = description[i]
if (name == fieldName):
break
if (i >= (len(description) - 1)):
raise RuntimeError(('Field name %s not found in this encoder' % fieldName))
return (offset, (description[(i + 1)][1] - offset))
|
'Return a description of the given bit in the encoded output.
This will include the field name and the offset within the field.
:param bitOffset: Offset of the bit to get the description of
:param formatted: If True, the bitOffset is w.r.t. formatted output,
which includes separators
:return: tuple(``fieldName``, ``offsetWithinField``)'
| def encodedBitDescription(self, bitOffset, formatted=False):
| (prevFieldName, prevFieldOffset) = (None, None)
description = self.getDescription()
for i in xrange(len(description)):
(name, offset) = description[i]
if formatted:
offset = (offset + i)
if (bitOffset == (offset - 1)):
prevFieldName = 'separator'
prevFieldOffset = bitOffset
break
if (bitOffset < offset):
break
(prevFieldName, prevFieldOffset) = (name, offset)
width = (self.getDisplayWidth() if formatted else self.getWidth())
if ((prevFieldOffset is None) or (bitOffset > self.getWidth())):
raise IndexError(('Bit is outside of allowable range: [0 - %d]' % width))
return (prevFieldName, (bitOffset - prevFieldOffset))
|
'Pretty-print a header that labels the sub-fields of the encoded
output. This can be used in conjuction with :meth:`.pprint`.
:param prefix: printed before the header if specified'
| def pprintHeader(self, prefix=''):
| print prefix,
description = (self.getDescription() + [('end', self.getWidth())])
for i in xrange((len(description) - 1)):
name = description[i][0]
width = (description[(i + 1)][1] - description[i][1])
formatStr = ('%%-%ds |' % width)
if (len(name) > width):
pname = name[0:width]
else:
pname = name
print (formatStr % pname),
print
print prefix, ('-' * ((self.getWidth() + ((len(description) - 1) * 3)) - 1))
|
'Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified'
| def pprint(self, output, prefix=''):
| print prefix,
description = (self.getDescription() + [('end', self.getWidth())])
for i in xrange((len(description) - 1)):
offset = description[i][1]
nextoffset = description[(i + 1)][1]
print ('%s |' % bitsToString(output[offset:nextoffset])),
print
|
'Takes an encoded output and does its best to work backwards and generate
the input that would have generated it.
In cases where the encoded output contains more ON bits than an input
would have generated, this routine will return one or more ranges of inputs
which, if their encoded outputs were ORed together, would produce the
target output. This behavior makes this method suitable for doing things
like generating a description of a learned coincidence in the SP, which
in many cases might be a union of one or more inputs.
If instead, you want to figure the *most likely* single input scalar value
that would have generated a specific encoded output, use the
:meth:`.topDownCompute` method.
If you want to pretty print the return value from this method, use the
:meth:`.decodedToStr` method.
:param encoded: The encoded output that you want decode
:param parentFieldName: The name of the encoder which is our parent. This name
is prefixed to each of the field names within this encoder to form the
keys of the dict() in the retval.
:return: tuple(``fieldsDict``, ``fieldOrder``)
``fieldsDict`` is a dict() where the keys represent field names
(only 1 if this is a simple encoder, > 1 if this is a multi
or date encoder) and the values are the result of decoding each
field. If there are no bits in encoded that would have been
generated by a field, it won\'t be present in the dict. The
key of each entry in the dict is formed by joining the passed in
parentFieldName with the child encoder name using a \'.\'.
Each \'value\' in ``fieldsDict`` consists of (ranges, desc), where
ranges is a list of one or more (minVal, maxVal) ranges of
input that would generate bits in the encoded output and \'desc\'
is a pretty print description of the ranges. For encoders like
the category encoder, the \'desc\' will contain the category
names that correspond to the scalar values included in the
ranges.
``fieldOrder`` is a list of the keys from ``fieldsDict``, in the
same order as the fields appear in the encoded output.
TODO: when we switch to Python 2.7 or 3.x, use OrderedDict
Example retvals for a scalar encoder:
.. code-block:: python
{\'amount\': ( [[1,3], [7,10]], \'1-3, 7-10\' )}
{\'amount\': ( [[2.5,2.5]], \'2.5\' )}
Example retval for a category encoder:
.. code-block:: python
{\'country\': ( [[1,1], [5,6]], \'US, GB, ES\' )}
Example retval for a multi encoder:
.. code-block:: python
{\'amount\': ( [[2.5,2.5]], \'2.5\' ),
\'country\': ( [[1,1], [5,6]], \'US, GB, ES\' )}'
| def decode(self, encoded, parentFieldName=''):
| fieldsDict = dict()
fieldsOrder = []
if (parentFieldName == ''):
parentName = self.name
else:
parentName = ('%s.%s' % (parentFieldName, self.name))
if (self.encoders is not None):
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if (i < (len(self.encoders) - 1)):
nextOffset = self.encoders[(i + 1)][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
(subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput, parentFieldName=parentName)
fieldsDict.update(subFieldsDict)
fieldsOrder.extend(subFieldsOrder)
return (fieldsDict, fieldsOrder)
|
'Return a pretty print string representing the return value from
:meth:`.decode`.'
| def decodedToStr(self, decodeResults):
| (fieldsDict, fieldsOrder) = decodeResults
desc = ''
for fieldName in fieldsOrder:
(ranges, rangesStr) = fieldsDict[fieldName]
if (len(desc) > 0):
desc += (', %s:' % fieldName)
else:
desc += ('%s:' % fieldName)
desc += ('[%s]' % rangesStr)
return desc
|
'**Must be overridden by subclasses.**
Returns a list of items, one for each bucket defined by this encoder.
Each item is the value assigned to that bucket, this is the same as the
:attr:`.EncoderResult.value` that would be returned by
:meth:`.getBucketInfo` for that bucket and is in the same format as the
input that would be passed to :meth:`.encode`.
This call is faster than calling :meth:`.getBucketInfo` on each bucket
individually if all you need are the bucket values.
:return: list of items, each item representing the bucket value for that
bucket.'
| def getBucketValues(self):
| raise Exception('getBucketValues must be implemented by all subclasses')
|
'Returns a list of :class:`.EncoderResult` namedtuples describing the inputs
for each sub-field that correspond to the bucket indices passed in
``buckets``. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param buckets: The list of bucket indices, one for each sub-field encoder.
These bucket indices for example may have been retrieved
from the :meth:`.getBucketIndices` call.
:return: A list of :class:`.EncoderResult`.'
| def getBucketInfo(self, buckets):
| if (self.encoders is None):
raise RuntimeError('Must be implemented in sub-class')
retVals = []
bucketOffset = 0
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if (encoder.encoders is not None):
nextBucketOffset = (bucketOffset + len(encoder.encoders))
else:
nextBucketOffset = (bucketOffset + 1)
bucketIndices = buckets[bucketOffset:nextBucketOffset]
values = encoder.getBucketInfo(bucketIndices)
retVals.extend(values)
bucketOffset = nextBucketOffset
return retVals
|
'Returns a list of :class:`.EncoderResult` namedtuples describing the
top-down best guess inputs for each sub-field given the encoded output.
These are the values which are most likely to generate the given encoded
output. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param encoded: The encoded output. Typically received from the topDown
outputs from the spatial pooler just above us.
:return: A list of :class:`.EncoderResult`'
| def topDownCompute(self, encoded):
| if (self.encoders is None):
raise RuntimeError('Must be implemented in sub-class')
retVals = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if (i < (len(self.encoders) - 1)):
nextOffset = self.encoders[(i + 1)][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
values = encoder.topDownCompute(fieldOutput)
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
return retVals
|
'Compute closeness scores between the expected scalar value(s) and actual
scalar value(s). The expected scalar values are typically those obtained
from the :meth:`.getScalars` method. The actual scalar values are typically
those returned from :meth:`.topDownCompute`.
This method returns one closeness score for each value in expValues (or
actValues which must be the same length). The closeness score ranges from
0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match.
If this encoder is a simple, single field encoder, then it will expect
just 1 item in each of the ``expValues`` and ``actValues`` arrays.
Multi-encoders will expect 1 item per sub-encoder.
Each encoder type can define it\'s own metric for closeness. For example,
a category encoder may return either 1 or 0, if the scalar matches exactly
or not. A scalar encoder might return a percentage match, etc.
:param expValues: Array of expected scalar values, typically obtained from
:meth:`.getScalars`
:param actValues: Array of actual values, typically obtained from
:meth:`.topDownCompute`
:return: Array of closeness scores, one per item in expValues (or
actValues).'
| def closenessScores(self, expValues, actValues, fractional=True):
| if (self.encoders is None):
err = abs((expValues[0] - actValues[0]))
if fractional:
denom = max(expValues[0], actValues[0])
if (denom == 0):
denom = 1.0
closeness = (1.0 - (float(err) / denom))
if (closeness < 0):
closeness = 0
else:
closeness = err
return numpy.array([closeness])
scalarIdx = 0
retVals = numpy.array([])
for (name, encoder, offset) in self.encoders:
values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:], fractional=fractional)
scalarIdx += len(values)
retVals = numpy.hstack((retVals, values))
return retVals
|
'Calculate width of display for bits plus blanks between fields.
:return: (int) width of display for bits plus blanks between fields'
| def getDisplayWidth(self):
| width = ((self.getWidth() + len(self.getDescription())) - 1)
return width
|
'[Encoder class virtual method override]'
| def getDecoderOutputFieldTypes(self):
| return (FieldMetaType.string,)
|
'See method description in base.py'
| def getScalars(self, input):
| return numpy.array([0])
|
'See method description in base.py'
| def getBucketIndices(self, input):
| return [0]
|
'See method description in base.py'
| def encodeIntoArray(self, inputVal, outputVal):
| if (len(inputVal) != len(outputVal)):
raise ValueError(('Different input (%i) and output (%i) sizes.' % (len(inputVal), len(outputVal))))
if ((self.w is not None) and (sum(inputVal) != self.w)):
raise ValueError(('Input has %i bits but w was set to %i.' % (sum(inputVal), self.w)))
outputVal[:] = inputVal[:]
if (self.verbosity >= 2):
print 'input:', inputVal, 'output:', outputVal
print 'decoded:', self.decodedToStr(self.decode(outputVal))
|
'See the function description in base.py'
| def decode(self, encoded, parentFieldName=''):
| if (parentFieldName != ''):
fieldName = ('%s.%s' % (parentFieldName, self.name))
else:
fieldName = self.name
return ({fieldName: ([[0, 0]], 'input')}, [fieldName])
|
'See the function description in base.py'
| def getBucketInfo(self, buckets):
| return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
|
'See the function description in base.py'
| def topDownCompute(self, encoded):
| return EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))
|
'Does a bitwise compare of the two bitmaps and returns a fractonal
value between 0 and 1 of how similar they are.
- ``1`` => identical
- ``0`` => no overlaping bits
``kwargs`` will have the keyword "fractional", which is assumed by this
encoder.'
| def closenessScores(self, expValues, actValues, **kwargs):
| ratio = 1.0
esum = int(expValues.sum())
asum = int(actValues.sum())
if (asum > esum):
diff = (asum - esum)
if (diff < esum):
ratio = (1 - (diff / float(esum)))
else:
ratio = (1 / float(diff))
olap = (expValues & actValues)
osum = int(olap.sum())
if (esum == 0):
r = 0.0
else:
r = (osum / float(esum))
r = (r * ratio)
return numpy.array([r])
|
'[Encoder class virtual method override]'
| def getDecoderOutputFieldTypes(self):
| return (FieldMetaType.integer,)
|
'See method description in base.py'
| def getScalars(self, input):
| if (input == SENTINEL_VALUE_FOR_MISSING_DATA):
return numpy.array([None])
else:
return numpy.array([self.categoryToIndex.get(input, 0)])
|
'See method description in base.py'
| def getBucketIndices(self, input):
| if (input == SENTINEL_VALUE_FOR_MISSING_DATA):
return [None]
else:
return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
|
'See the function description in base.py'
| def decode(self, encoded, parentFieldName=''):
| (fieldsDict, fieldNames) = self.encoder.decode(encoded)
if (len(fieldsDict) == 0):
return (fieldsDict, fieldNames)
assert (len(fieldsDict) == 1)
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
desc = ''
for (minV, maxV) in inRanges:
minV = int(round(minV))
maxV = int(round(maxV))
outRanges.append((minV, maxV))
while (minV <= maxV):
if (len(desc) > 0):
desc += ', '
desc += self.indexToCategory[minV]
minV += 1
if (parentFieldName != ''):
fieldName = ('%s.%s' % (parentFieldName, self.name))
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName])
|
'See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder'
| def closenessScores(self, expValues, actValues, fractional=True):
| expValue = expValues[0]
actValue = actValues[0]
if (expValue == actValue):
closeness = 1.0
else:
closeness = 0.0
if (not fractional):
closeness = (1.0 - closeness)
return numpy.array([closeness])
|
'See the function description in base.py'
| def getBucketValues(self):
| if (self._bucketValues is None):
numBuckets = len(self.encoder.getBucketValues())
self._bucketValues = []
for bucketIndex in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value)
return self._bucketValues
|
'See the function description in base.py'
| def getBucketInfo(self, buckets):
| bucketInfo = self.encoder.getBucketInfo(buckets)[0]
categoryIndex = int(round(bucketInfo.value))
category = self.indexToCategory[categoryIndex]
return [EncoderResult(value=category, scalar=categoryIndex, encoding=bucketInfo.encoding)]
|
'See the function description in base.py'
| def topDownCompute(self, encoded):
| encoderResult = self.encoder.topDownCompute(encoded)[0]
value = encoderResult.value
categoryIndex = int(round(value))
category = self.indexToCategory[categoryIndex]
return EncoderResult(value=category, scalar=categoryIndex, encoding=encoderResult.encoding)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.