desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Set the radius, resolution and range. These values are updated when minval and/or maxval change.'
def _setEncoderParams(self):
self.rangeInternal = float((self.maxval - self.minval)) self.resolution = (float(self.rangeInternal) / (self.n - self.w)) self.radius = (self.w * self.resolution) self.range = (self.rangeInternal + self.resolution) self.nInternal = (self.n - (2 * self.padding)) self._bucketValues = None
'TODO: document'
def setFieldStats(self, fieldName, fieldStats):
if ((fieldStats[fieldName]['min'] == None) or (fieldStats[fieldName]['max'] == None)): return self.minval = fieldStats[fieldName]['min'] self.maxval = fieldStats[fieldName]['max'] if (self.minval == self.maxval): self.maxval += 1 self._setEncoderParams()
'Potentially change the minval and maxval using input. **The learn flag is currently not supported by cla regions.**'
def _setMinAndMax(self, input, learn):
self.slidingWindow.next(input) if ((self.minval is None) and (self.maxval is None)): self.minval = input self.maxval = (input + 1) self._setEncoderParams() elif learn: sorted = self.slidingWindow.getSlidingWindow() sorted.sort() minOverWindow = sorted[0] maxOverWindow = sorted[(len(sorted) - 1)] if (minOverWindow < self.minval): if (self.verbosity >= 2): print ('Input %s=%.2f smaller than minval %.2f. Adjusting minval to %.2f' % (self.name, input, self.minval, minOverWindow)) self.minval = minOverWindow self._setEncoderParams() if (maxOverWindow > self.maxval): if (self.verbosity >= 2): print ('Input %s=%.2f greater than maxval %.2f. Adjusting maxval to %.2f' % (self.name, input, self.maxval, maxOverWindow)) self.maxval = maxOverWindow self._setEncoderParams()
'[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]'
def getBucketIndices(self, input, learn=None):
self.recordNum += 1 if (learn is None): learn = self._learningEnabled if ((type(input) is float) and math.isnan(input)): input = SENTINEL_VALUE_FOR_MISSING_DATA if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return [None] else: self._setMinAndMax(input, learn) return super(AdaptiveScalarEncoder, self).getBucketIndices(input)
'[overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray]'
def encodeIntoArray(self, input, output, learn=None):
self.recordNum += 1 if (learn is None): learn = self._learningEnabled if (input == SENTINEL_VALUE_FOR_MISSING_DATA): output[0:self.n] = 0 elif (not math.isnan(input)): self._setMinAndMax(input, learn) super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)
'[overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo]'
def getBucketInfo(self, buckets):
if ((self.minval is None) or (self.maxval is None)): return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)
'[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]'
def topDownCompute(self, encoded):
if ((self.minval is None) or (self.maxval is None)): return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)
'Initialize the random seed'
def _seed(self, seed=(-1)):
if (seed != (-1)): self.random = NupicRandom(seed) else: self.random = NupicRandom()
'[Encoder class virtual method override]'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.string,)
'Generate a new and unique representation. Returns a numpy array of shape (n,).'
def _newRep(self):
maxAttempts = 1000 for _ in xrange(maxAttempts): foundUnique = True population = numpy.arange(self.n, dtype=numpy.uint32) choices = numpy.arange(self.w, dtype=numpy.uint32) oneBits = sorted(self.random.sample(population, choices)) sdr = numpy.zeros(self.n, dtype='uint8') sdr[oneBits] = 1 for i in xrange(self.ncategories): if (sdr == self.sdrs[i]).all(): foundUnique = False break if foundUnique: break if (not foundUnique): raise RuntimeError(('Error, could not find unique pattern %d after %d attempts' % (self.ncategories, maxAttempts))) return sdr
'See method description in base.py'
def getScalars(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return numpy.array([0]) index = self.categoryToIndex.get(input, None) if (index is None): if self._learningEnabled: self._addCategory(input) index = (self.ncategories - 1) else: index = 0 return numpy.array([index])
'See method description in base.py'
def getBucketIndices(self, input):
return self.getScalars(input)
'See the function description in base.py'
def decode(self, encoded, parentFieldName=''):
assert (encoded[0:self.n] <= 1.0).all() resultString = '' resultRanges = [] overlaps = (self.sdrs * encoded[0:self.n]).sum(axis=1) if (self.verbosity >= 2): print 'Overlaps for decoding:' for i in xrange(0, self.ncategories): print ('%d %s' % (overlaps[i], self.categories[i])) matchingCategories = (overlaps > self.thresholdOverlap).nonzero()[0] for index in matchingCategories: if (resultString != ''): resultString += ' ' resultString += str(self.categories[index]) resultRanges.append([int(index), int(index)]) if (parentFieldName != ''): fieldName = ('%s.%s' % (parentFieldName, self.name)) else: fieldName = self.name return ({fieldName: (resultRanges, resultString)}, [fieldName])
'Return the interal _topDownMappingM matrix used for handling the bucketInfo() and topDownCompute() methods. This is a matrix, one row per category (bucket) where each row contains the encoded output for that category.'
def _getTopDownMapping(self):
if (self._topDownMappingM is None): self._topDownMappingM = SM32(self.ncategories, self.n) outputSpace = numpy.zeros(self.n, dtype=GetNTAReal()) for i in xrange(self.ncategories): self.encodeIntoArray(self.categories[i], outputSpace) self._topDownMappingM.setRowFromDense(i, outputSpace) return self._topDownMappingM
'See the function description in base.py'
def getBucketValues(self):
return self.categories
'See the function description in base.py'
def getBucketInfo(self, buckets):
if (self.ncategories == 0): return 0 topDownMappingM = self._getTopDownMapping() categoryIndex = buckets[0] category = self.categories[categoryIndex] encoding = topDownMappingM.getRow(categoryIndex) return [EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)]
'See the function description in base.py'
def topDownCompute(self, encoded):
if (self.ncategories == 0): return 0 topDownMappingM = self._getTopDownMapping() categoryIndex = topDownMappingM.rightVecProd(encoded).argmax() category = self.categories[categoryIndex] encoding = topDownMappingM.getRow(categoryIndex) return EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)
'See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder'
def closenessScores(self, expValues, actValues, fractional=True):
expValue = expValues[0] actValue = actValues[0] if (expValue == actValue): closeness = 1.0 else: closeness = 0.0 if (not fractional): closeness = (1.0 - closeness) return numpy.array([closeness])
'Initialize the random seed'
def _seed(self, seed=(-1)):
if (seed != (-1)): self.random = NupicRandom(seed) else: self.random = NupicRandom()
'See method description in base.py'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.float,)
'See method description in base.py'
def getWidth(self):
return self.n
'See method description in base.py'
def getBucketIndices(self, x):
if ((isinstance(x, float) and math.isnan(x)) or (x == SENTINEL_VALUE_FOR_MISSING_DATA)): return [None] if (self._offset is None): self._offset = x bucketIdx = ((self._maxBuckets / 2) + int(round(((x - self._offset) / self.resolution)))) if (bucketIdx < 0): bucketIdx = 0 elif (bucketIdx >= self._maxBuckets): bucketIdx = (self._maxBuckets - 1) return [bucketIdx]
'Given a bucket index, return the list of non-zero bits. If the bucket index does not exist, it is created. If the index falls outside our range we clip it. :param index The bucket index to get non-zero bits for. @returns numpy array of indices of non-zero bits for specified index.'
def mapBucketIndexToNonZeroBits(self, index):
if (index < 0): index = 0 if (index >= self._maxBuckets): index = (self._maxBuckets - 1) if (not self.bucketMap.has_key(index)): if (self.verbosity >= 2): print 'Adding additional buckets to handle index=', index self._createBucket(index) return self.bucketMap[index]
'See method description in base.py'
def encodeIntoArray(self, x, output):
if ((x is not None) and (not isinstance(x, numbers.Number))): raise TypeError(('Expected a scalar input but got input of type %s' % type(x))) bucketIdx = self.getBucketIndices(x)[0] output[0:self.n] = 0 if (bucketIdx is not None): output[self.mapBucketIndexToNonZeroBits(bucketIdx)] = 1
'Create the given bucket index. Recursively create as many in-between bucket indices as necessary.'
def _createBucket(self, index):
if (index < self.minIndex): if (index == (self.minIndex - 1)): self.bucketMap[index] = self._newRepresentation(self.minIndex, index) self.minIndex = index else: self._createBucket((index + 1)) self._createBucket(index) elif (index == (self.maxIndex + 1)): self.bucketMap[index] = self._newRepresentation(self.maxIndex, index) self.maxIndex = index else: self._createBucket((index - 1)) self._createBucket(index)
'Return a new representation for newIndex that overlaps with the representation at index by exactly w-1 bits'
def _newRepresentation(self, index, newIndex):
newRepresentation = self.bucketMap[index].copy() ri = (newIndex % self.w) newBit = self.random.getUInt32(self.n) newRepresentation[ri] = newBit while ((newBit in self.bucketMap[index]) or (not self._newRepresentationOK(newRepresentation, newIndex))): self.numTries += 1 newBit = self.random.getUInt32(self.n) newRepresentation[ri] = newBit return newRepresentation
'Return True if this new candidate representation satisfies all our overlap rules. Since we know that neighboring representations differ by at most one bit, we compute running overlaps.'
def _newRepresentationOK(self, newRep, newIndex):
if (newRep.size != self.w): return False if ((newIndex < (self.minIndex - 1)) or (newIndex > (self.maxIndex + 1))): raise ValueError('newIndex must be within one of existing indices') newRepBinary = numpy.array(([False] * self.n)) newRepBinary[newRep] = True midIdx = (self._maxBuckets / 2) runningOverlap = self._countOverlap(self.bucketMap[self.minIndex], newRep) if (not self._overlapOK(self.minIndex, newIndex, overlap=runningOverlap)): return False for i in range((self.minIndex + 1), (midIdx + 1)): newBit = ((i - 1) % self.w) if newRepBinary[self.bucketMap[(i - 1)][newBit]]: runningOverlap -= 1 if newRepBinary[self.bucketMap[i][newBit]]: runningOverlap += 1 if (not self._overlapOK(i, newIndex, overlap=runningOverlap)): return False for i in range((midIdx + 1), (self.maxIndex + 1)): newBit = (i % self.w) if newRepBinary[self.bucketMap[(i - 1)][newBit]]: runningOverlap -= 1 if newRepBinary[self.bucketMap[i][newBit]]: runningOverlap += 1 if (not self._overlapOK(i, newIndex, overlap=runningOverlap)): return False return True
'Return the overlap between bucket indices i and j'
def _countOverlapIndices(self, i, j):
if (self.bucketMap.has_key(i) and self.bucketMap.has_key(j)): iRep = self.bucketMap[i] jRep = self.bucketMap[j] return self._countOverlap(iRep, jRep) else: raise ValueError("Either i or j don't exist")
'Return the overlap between two representations. rep1 and rep2 are lists of non-zero indices.'
@staticmethod def _countOverlap(rep1, rep2):
overlap = 0 for e in rep1: if (e in rep2): overlap += 1 return overlap
'Return True if the given overlap between bucket indices i and j are acceptable. If overlap is not specified, calculate it from the bucketMap'
def _overlapOK(self, i, j, overlap=None):
if (overlap is None): overlap = self._countOverlapIndices(i, j) if (abs((i - j)) < self.w): if (overlap == (self.w - abs((i - j)))): return True else: return False elif (overlap <= self._maxOverlap): return True else: return False
'Initialize the bucket map assuming the given number of maxBuckets.'
def _initializeBucketMap(self, maxBuckets, offset):
self._maxBuckets = maxBuckets self.minIndex = (self._maxBuckets / 2) self.maxIndex = (self._maxBuckets / 2) self._offset = offset self.bucketMap = {} def _permutation(n): r = numpy.arange(n, dtype=numpy.uint32) self.random.shuffle(r) return r self.bucketMap[self.minIndex] = _permutation(self.n)[0:self.w] self.numTries = 0
'(helper function) There are three different ways of thinking about the representation. Handle each case here.'
def _initEncoder(self, w, minval, maxval, n, radius, resolution):
if (n != 0): if ((radius != 0) or (resolution != 0)): raise ValueError('Only one of n/radius/resolution can be specified for a ScalarEncoder') assert (n > w) self.n = n if ((minval is not None) and (maxval is not None)): if (not self.periodic): self.resolution = (float(self.rangeInternal) / (self.n - self.w)) else: self.resolution = (float(self.rangeInternal) / self.n) self.radius = (self.w * self.resolution) if self.periodic: self.range = self.rangeInternal else: self.range = (self.rangeInternal + self.resolution) else: if (radius != 0): if (resolution != 0): raise ValueError('Only one of radius/resolution can be specified for a ScalarEncoder') self.radius = radius self.resolution = (float(self.radius) / w) elif (resolution != 0): self.resolution = float(resolution) self.radius = (self.resolution * self.w) else: raise Exception('One of n, radius, resolution must be specified for a ScalarEncoder') if ((minval is not None) and (maxval is not None)): if self.periodic: self.range = self.rangeInternal else: self.range = (self.rangeInternal + self.resolution) nfloat = ((self.w * (self.range / self.radius)) + (2 * self.padding)) self.n = int(math.ceil(nfloat))
'(helper function) check if the settings are reasonable for SP to work'
def _checkReasonableSettings(self):
if (self.w < 21): raise ValueError(('Number of bits in the SDR (%d) must be >= 21 (use forced=True to override).' % self.w))
'[Encoder class virtual method override]'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.float,)
'Return the bit offset of the first bit to be set in the encoder output. For periodic encoders, this can be a negative number when the encoded output wraps around.'
def _getFirstOnBit(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return [None] else: if (input < self.minval): if (self.clipInput and (not self.periodic)): if (self.verbosity > 0): print ('Clipped input %s=%.2f to minval %.2f' % (self.name, input, self.minval)) input = self.minval else: raise Exception(('input (%s) less than range (%s - %s)' % (str(input), str(self.minval), str(self.maxval)))) if self.periodic: if (input >= self.maxval): raise Exception(('input (%s) greater than periodic range (%s - %s)' % (str(input), str(self.minval), str(self.maxval)))) elif (input > self.maxval): if self.clipInput: if (self.verbosity > 0): print ('Clipped input %s=%.2f to maxval %.2f' % (self.name, input, self.maxval)) input = self.maxval else: raise Exception(('input (%s) greater than range (%s - %s)' % (str(input), str(self.minval), str(self.maxval)))) if self.periodic: centerbin = (int((((input - self.minval) * self.nInternal) / self.range)) + self.padding) else: centerbin = (int((((input - self.minval) + (self.resolution / 2)) / self.resolution)) + self.padding) minbin = (centerbin - self.halfwidth) return [minbin]
'See method description in base.py'
def getBucketIndices(self, input):
if ((type(input) is float) and math.isnan(input)): input = SENTINEL_VALUE_FOR_MISSING_DATA if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return [None] minbin = self._getFirstOnBit(input)[0] if self.periodic: bucketIdx = (minbin + self.halfwidth) if (bucketIdx < 0): bucketIdx += self.n else: bucketIdx = minbin return [bucketIdx]
'See method description in base.py'
def encodeIntoArray(self, input, output, learn=True):
if ((input is not None) and (not isinstance(input, numbers.Number))): raise TypeError(('Expected a scalar input but got input of type %s' % type(input))) if ((type(input) is float) and math.isnan(input)): input = SENTINEL_VALUE_FOR_MISSING_DATA bucketIdx = self._getFirstOnBit(input)[0] if (bucketIdx is None): output[0:self.n] = 0 else: output[:self.n] = 0 minbin = bucketIdx maxbin = (minbin + (2 * self.halfwidth)) if self.periodic: if (maxbin >= self.n): bottombins = ((maxbin - self.n) + 1) output[:bottombins] = 1 maxbin = (self.n - 1) if (minbin < 0): topbins = (- minbin) output[(self.n - topbins):self.n] = 1 minbin = 0 assert (minbin >= 0) assert (maxbin < self.n) output[minbin:(maxbin + 1)] = 1 if (self.verbosity >= 2): print print 'input:', input print 'range:', self.minval, '-', self.maxval print 'n:', self.n, 'w:', self.w, 'resolution:', self.resolution, 'radius', self.radius, 'periodic:', self.periodic print 'output:', self.pprint(output) print 'input desc:', self.decodedToStr(self.decode(output))
'See the function description in base.py'
def decode(self, encoded, parentFieldName=''):
tmpOutput = numpy.array((encoded[:self.n] > 0)).astype(encoded.dtype) if (not tmpOutput.any()): return (dict(), []) maxZerosInARow = self.halfwidth for i in xrange(maxZerosInARow): searchStr = numpy.ones((i + 3), dtype=encoded.dtype) searchStr[1:(-1)] = 0 subLen = len(searchStr) if self.periodic: for j in xrange(self.n): outputIndices = numpy.arange(j, (j + subLen)) outputIndices %= self.n if numpy.array_equal(searchStr, tmpOutput[outputIndices]): tmpOutput[outputIndices] = 1 else: for j in xrange(((self.n - subLen) + 1)): if numpy.array_equal(searchStr, tmpOutput[j:(j + subLen)]): tmpOutput[j:(j + subLen)] = 1 if (self.verbosity >= 2): print 'raw output:', encoded[:self.n] print 'filtered output:', tmpOutput nz = tmpOutput.nonzero()[0] runs = [] run = [nz[0], 1] i = 1 while (i < len(nz)): if (nz[i] == (run[0] + run[1])): run[1] += 1 else: runs.append(run) run = [nz[i], 1] i += 1 runs.append(run) if (self.periodic and (len(runs) > 1)): if ((runs[0][0] == 0) and ((runs[(-1)][0] + runs[(-1)][1]) == self.n)): runs[(-1)][1] += runs[0][1] runs = runs[1:] ranges = [] for run in runs: (start, runLen) = run if (runLen <= self.w): left = right = (start + (runLen / 2)) else: left = (start + self.halfwidth) right = (((start + runLen) - 1) - self.halfwidth) if (not self.periodic): inMin = (((left - self.padding) * self.resolution) + self.minval) inMax = (((right - self.padding) * self.resolution) + self.minval) else: inMin = ((((left - self.padding) * self.range) / self.nInternal) + self.minval) inMax = ((((right - self.padding) * self.range) / self.nInternal) + self.minval) if self.periodic: if (inMin >= self.maxval): inMin -= self.range inMax -= self.range if (inMin < self.minval): inMin = self.minval if (inMax < self.minval): inMax = self.minval if (self.periodic and (inMax >= self.maxval)): ranges.append([inMin, self.maxval]) ranges.append([self.minval, (inMax - self.range)]) else: if (inMax > self.maxval): inMax = self.maxval if (inMin > self.maxval): inMin = self.maxval ranges.append([inMin, inMax]) desc = self._generateRangeDescription(ranges) if (parentFieldName != ''): fieldName = ('%s.%s' % (parentFieldName, self.name)) else: fieldName = self.name return ({fieldName: (ranges, desc)}, [fieldName])
'generate description from a text description of the ranges'
def _generateRangeDescription(self, ranges):
desc = '' numRanges = len(ranges) for i in xrange(numRanges): if (ranges[i][0] != ranges[i][1]): desc += ('%.2f-%.2f' % (ranges[i][0], ranges[i][1])) else: desc += ('%.2f' % ranges[i][0]) if (i < (numRanges - 1)): desc += ', ' return desc
'Return the interal _topDownMappingM matrix used for handling the bucketInfo() and topDownCompute() methods. This is a matrix, one row per category (bucket) where each row contains the encoded output for that category.'
def _getTopDownMapping(self):
if (self._topDownMappingM is None): if self.periodic: self._topDownValues = numpy.arange((self.minval + (self.resolution / 2.0)), self.maxval, self.resolution) else: self._topDownValues = numpy.arange(self.minval, (self.maxval + (self.resolution / 2.0)), self.resolution) numCategories = len(self._topDownValues) self._topDownMappingM = SM32(numCategories, self.n) outputSpace = numpy.zeros(self.n, dtype=GetNTAReal()) for i in xrange(numCategories): value = self._topDownValues[i] value = max(value, self.minval) value = min(value, self.maxval) self.encodeIntoArray(value, outputSpace, learn=False) self._topDownMappingM.setRowFromDense(i, outputSpace) return self._topDownMappingM
'See the function description in base.py'
def getBucketValues(self):
if (self._bucketValues is None): topDownMappingM = self._getTopDownMapping() numBuckets = topDownMappingM.nRows() self._bucketValues = [] for bucketIdx in range(numBuckets): self._bucketValues.append(self.getBucketInfo([bucketIdx])[0].value) return self._bucketValues
'See the function description in base.py'
def getBucketInfo(self, buckets):
topDownMappingM = self._getTopDownMapping() category = buckets[0] encoding = self._topDownMappingM.getRow(category) if self.periodic: inputVal = ((self.minval + (self.resolution / 2.0)) + (category * self.resolution)) else: inputVal = (self.minval + (category * self.resolution)) return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]
'See the function description in base.py'
def topDownCompute(self, encoded):
topDownMappingM = self._getTopDownMapping() category = topDownMappingM.rightVecProd(encoded).argmax() return self.getBucketInfo([category])
'See the function description in base.py'
def closenessScores(self, expValues, actValues, fractional=True):
expValue = expValues[0] actValue = actValues[0] if self.periodic: expValue = (expValue % self.maxval) actValue = (actValue % self.maxval) err = abs((expValue - actValue)) if self.periodic: err = min(err, (self.maxval - err)) if fractional: pctErr = (float(err) / (self.maxval - self.minval)) pctErr = min(1.0, pctErr) closeness = (1.0 - pctErr) else: closeness = err return numpy.array([closeness])
'See method description in base.py'
def getScalarNames(self, parentFieldName=''):
names = [] def _formFieldName(encoder): if (parentFieldName == ''): return encoder.name else: return ('%s.%s' % (parentFieldName, encoder.name)) if (self.seasonEncoder is not None): names.append(_formFieldName(self.seasonEncoder)) if (self.dayOfWeekEncoder is not None): names.append(_formFieldName(self.dayOfWeekEncoder)) if (self.customDaysEncoder is not None): names.append(_formFieldName(self.customDaysEncoder)) if (self.weekendEncoder is not None): names.append(_formFieldName(self.weekendEncoder)) if (self.holidayEncoder is not None): names.append(_formFieldName(self.holidayEncoder)) if (self.timeOfDayEncoder is not None): names.append(_formFieldName(self.timeOfDayEncoder)) return names
'See method description in base.py'
def getEncodedValues(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return numpy.array([None]) assert isinstance(input, datetime.datetime) values = [] timetuple = input.timetuple() timeOfDay = (timetuple.tm_hour + (float(timetuple.tm_min) / 60.0)) if (self.seasonEncoder is not None): dayOfYear = timetuple.tm_yday values.append((dayOfYear - 1)) if (self.dayOfWeekEncoder is not None): dayOfWeek = (timetuple.tm_wday + (timeOfDay / 24.0)) values.append(dayOfWeek) if (self.weekendEncoder is not None): if ((timetuple.tm_wday == 6) or (timetuple.tm_wday == 5) or ((timetuple.tm_wday == 4) and (timeOfDay > 18))): weekend = 1 else: weekend = 0 values.append(weekend) if (self.customDaysEncoder is not None): if (timetuple.tm_wday in self.customDays): customDay = 1 else: customDay = 0 values.append(customDay) if (self.holidayEncoder is not None): holidays = [(12, 25)] val = 0 for h in holidays: hdate = datetime.datetime(timetuple.tm_year, h[0], h[1], 0, 0, 0) if (input > hdate): diff = (input - hdate) if (diff.days == 0): val = 1 break elif (diff.days == 1): val = (1.0 - (float(diff.seconds) / 86400)) break else: diff = (hdate - input) if (diff.days == 0): val = (1.0 - (float(diff.seconds) / 86400)) values.append(val) if (self.timeOfDayEncoder is not None): values.append(timeOfDay) return values
'See method description in :meth:`~.nupic.encoders.base.Encoder.getScalars`. :param input: (datetime) representing the time being encoded :returns: A numpy array of the corresponding scalar values in the following order: season, dayOfWeek, weekend, holiday, timeOfDay. Some of these fields might be omitted if they were not specified in the encoder.'
def getScalars(self, input):
return numpy.array(self.getEncodedValues(input))
'See method description in base.py'
def getBucketIndices(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return ([None] * len(self.encoders)) else: assert isinstance(input, datetime.datetime) scalars = self.getScalars(input) result = [] for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] result.extend(encoder.getBucketIndices(scalars[i])) return result
'See method description in base.py'
def encodeIntoArray(self, input, output):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): output[0:] = 0 else: if (not isinstance(input, datetime.datetime)): raise ValueError(('Input is type %s, expected datetime. Value: %s' % (type(input), str(input)))) scalars = self.getScalars(input) for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] encoder.encodeIntoArray(scalars[i], output[offset:])
'Adds one encoder. :param name: (string) name of encoder, should be unique :param encoder: (:class:`.Encoder`) the encoder to add'
def addEncoder(self, name, encoder):
self.encoders.append((name, encoder, self.width)) for d in encoder.getDescription(): self.description.append((d[0], (d[1] + self.width))) self.width += encoder.getWidth() self._flattenedEncoderList = None self._flattenedFieldTypeList = None
'Represents the sum of the widths of each fields encoding.'
def getWidth(self):
return self.width
':param fieldEncodings: dict of dicts, mapping field names to the field params dict. Each field params dict has the following keys: 1. ``fieldname``: data field name 2. ``type`` an encoder type 3. All other keys are encoder parameters For example, .. code-block:: python fieldEncodings={ \'dateTime\': dict(fieldname=\'dateTime\', type=\'DateEncoder\', timeOfDay=(5,5)), \'attendeeCount\': dict(fieldname=\'attendeeCount\', type=\'ScalarEncoder\', name=\'attendeeCount\', minval=0, maxval=250, clipInput=True, w=5, resolution=10), \'consumption\': dict(fieldname=\'consumption\',type=\'ScalarEncoder\', name=\'consumption\', minval=0,maxval=110, clipInput=True, w=5, resolution=5), would yield a vector with a part encoded by the :class:`.DateEncoder`, and to parts seperately taken care of by the :class:`.ScalarEncoder` with the specified parameters. The three seperate encodings are then merged together to the final vector, in such a way that they are always at the same location within the vector.'
def addMultipleEncoders(self, fieldEncodings):
encoderList = sorted(fieldEncodings.items()) for (key, fieldParams) in encoderList: if ((':' not in key) and (fieldParams is not None)): fieldParams = fieldParams.copy() fieldName = fieldParams.pop('fieldname') encoderName = fieldParams.pop('type') try: self.addEncoder(fieldName, eval(encoderName)(**fieldParams)) except TypeError as e: print ('#### Error in constructing %s encoder. Possibly missing some required constructor parameters. Parameters that were provided are: %s' % (encoderName, fieldParams)) raise
'See `nupic.encoders.base.Encoder` for more information.'
def getWidth(self):
return self.n
'See `nupic.encoders.base.Encoder` for more information.'
def getDescription(self):
return [('coordinate', 0), ('radius', 1)]
'See `nupic.encoders.base.Encoder` for more information.'
def getScalars(self, inputData):
return numpy.array(([0] * len(inputData)))
'See `nupic.encoders.base.Encoder` for more information. @param inputData (tuple) Contains coordinate (numpy.array, N-dimensional integer coordinate) and radius (int) @param output (numpy.array) Stores encoded SDR in this numpy array'
def encodeIntoArray(self, inputData, output):
(coordinate, radius) = inputData assert isinstance(radius, int), 'Expected integer radius, got: {} ({})'.format(radius, type(radius)) neighbors = self._neighbors(coordinate, radius) winners = self._topWCoordinates(neighbors, self.w) bitFn = (lambda coordinate: self._bitForCoordinate(coordinate, self.n)) indices = numpy.array([bitFn(w) for w in winners]) output[:] = 0 output[indices] = 1
'Returns coordinates around given coordinate, within given radius. Includes given coordinate. @param coordinate (numpy.array) N-dimensional integer coordinate @param radius (int) Radius around `coordinate` @return (numpy.array) List of coordinates'
@staticmethod def _neighbors(coordinate, radius):
ranges = (xrange((n - radius), ((n + radius) + 1)) for n in coordinate.tolist()) return numpy.array(list(itertools.product(*ranges)))
'Returns the top W coordinates by order. @param coordinates (numpy.array) A 2D numpy array, where each element is a coordinate @param w (int) Number of top coordinates to return @return (numpy.array) A subset of `coordinates`, containing only the top ones by order'
@classmethod def _topWCoordinates(cls, coordinates, w):
orders = numpy.array([cls._orderForCoordinate(c) for c in coordinates.tolist()]) indices = numpy.argsort(orders)[(- w):] return coordinates[indices]
'Hash a coordinate to a 64 bit integer.'
@staticmethod def _hashCoordinate(coordinate):
coordinateStr = ','.join((str(v) for v in coordinate)) hash = int((int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64))) return hash
'Returns the order for a coordinate. @param coordinate (numpy.array) Coordinate @return (float) A value in the interval [0, 1), representing the order of the coordinate'
@classmethod def _orderForCoordinate(cls, coordinate):
seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getReal64()
'Maps the coordinate to a bit in the SDR. @param coordinate (numpy.array) Coordinate @param n (int) The number of available bits in the SDR @return (int) The index to a bit in the SDR'
@classmethod def _bitForCoordinate(cls, coordinate, n):
seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getUInt32(n)
''
def __init__(cls, name, bases, dict):
def custom_setattr(self, name, value): 'A custom replacement for __setattr__\n\n Allows setting only exisitng attributes. It is designed to work\n with the _allow_new_attributes decorator.\n\n It works is by checking if the requested attribute is already in the\n __dict__ or if the _canAddAttributes counter > 0. Otherwise it raises an\n exception.\n If all is well it calls the original __setattr__. This means it can work\n also with classes that already have custom __setattr__\n ' if ((name == '_canAddAttributes') or (hasattr(self, '_canAddAttributes') and (self._canAddAttributes > 0)) or hasattr(self, name)): return self._original_setattr(name, value) else: raise Exception(('Attempting to set a new attribute: ' + name)) if (deactivation_key in os.environ): return super(LockAttributesMetaclass, cls).__init__(name, bases, dict) if (not hasattr(cls, '_original_setattr')): cls._original_setattr = cls.__setattr__ cls.__setattr__ = custom_setattr if ('__init__' in dict): setattr(cls, '_original_init', dict['__init__']) methods = [('__init__', dict.get('__init__', _simple_init)), ('__setstate__', dict.get('__setstate__', None))] for (name, method) in methods: if (method is not None): setattr(cls, name, _allow_new_attributes(method))
'Print out what test we are running'
def printTestHeader(self):
print print '###############################################################' print ('Running %s...' % (self,)) print ('[%s UTC]' % datetime.utcnow()) print '###############################################################' sys.stdout.flush() return
'Print out a banner'
def printBanner(self, msg, *args):
print print '===============================================================' print (msg % args) print >>sys.stdout, ('[%s UTC; %s]' % (datetime.utcnow(), self)) print '===============================================================' sys.stdout.flush() return
'Add an item to the log items list for the currently running session. Our self.myAssertXXXXXX wrappers add the current items to the msg that is passed to the unittest\'s assertXXXXX methods. The extra info will show up in test results if the test fails.'
def addExtraLogItem(self, item):
self.__logItems.append(item) return
'Called by our unittest.TestCase.assertXXXXXX overrides to construct a message from the given message plus self.__logItems, if any. If self.__logItems is non-empty, returns a dictionary containing the given message value as the "msg" property and self.__logItems as the "extra" property. If self.__logItems is empy, returns the given msg arg.'
def __wrapMsg(self, msg):
msg = (msg if (not self.__logItems) else {'msg': msg, 'extra': copy.copy(self.__logItems)}) msg = str(msg) msg = msg.replace('\\n', '\n') return msg
'unittest.TestCase.assertEqual override; adds extra log items to msg'
def assertEqual(self, first, second, msg=None):
unittest.TestCase.assertEqual(self, first, second, self.__wrapMsg(msg)) return
'unittest.TestCase.assertNotEqual override; adds extra log items to msg'
def assertNotEqual(self, first, second, msg=None):
unittest.TestCase.assertNotEqual(self, first, second, self.__wrapMsg(msg)) return
'unittest.TestCase.assertTrue override; adds extra log items to msg'
def assertTrue(self, expr, msg=None):
unittest.TestCase.assertTrue(self, expr, self.__wrapMsg(msg)) return
'unittest.TestCase.assertFalse override; adds extra log items to msg'
def assertFalse(self, expr, msg=None):
unittest.TestCase.assertFalse(self, expr, self.__wrapMsg(msg)) return
'Override this method to set the default TM params for `self.tm`.'
def getDefaultTMParams(self):
return {}
'Initialize Temporal Memory, and other member variables. :param overrides: overrides for default Temporal Memory parameters'
def init(self, overrides=None):
params = self._computeTMParams(overrides) class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, self.getTMClass(), ): pass self.tm = MonitoredTemporalMemory(**params)
'returns: (dict) containing all custom configuration properties.'
@classmethod def getCustomDict(cls):
return _CustomConfigurationFileWrapper.getCustomDict()
'Set a single custom setting and persist it to the custom configuration store. :param propertyName: (string) containing the name of the property to get :param value: (object) value to set the property to'
@classmethod def setCustomProperty(cls, propertyName, value):
cls.setCustomProperties({propertyName: value})
'Set multiple custom properties and persist them to the custom configuration store. :param properties: (dict) of property name/value pairs to set'
@classmethod def setCustomProperties(cls, properties):
_getLogger().info('Setting custom configuration properties=%r; caller=%r', properties, traceback.format_stack()) _CustomConfigurationFileWrapper.edit(properties) for (propertyName, value) in properties.iteritems(): cls.set(propertyName, value)
'Clear all configuration properties from in-memory cache, but do NOT alter the custom configuration file. Used in unit-testing.'
@classmethod def clear(cls):
super(Configuration, cls).clear() _CustomConfigurationFileWrapper.clear(persistent=False)
'Clear all custom configuration settings and delete the persistent custom configuration store.'
@classmethod def resetCustomConfig(cls):
_getLogger().info('Resetting all custom configuration properties; caller=%r', traceback.format_stack()) super(Configuration, cls).clear() _CustomConfigurationFileWrapper.clear(persistent=True)
'Loads custom configuration settings from their persistent storage. .. warning :: DO NOT CALL THIS: It\'s typically not necessary to call this method directly. This method exists *solely* for the benefit of ``prepare_conf.py``, which needs to load configuration files selectively.'
@classmethod def loadCustomConfig(cls):
cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)
'Intercept the _readStdConfigFiles call from our base config class to read in base and custom configuration settings.'
@classmethod def _readStdConfigFiles(cls):
super(Configuration, cls)._readStdConfigFiles() cls.loadCustomConfig()
'If persistent is True, delete the temporary file Parameters: persistent: if True, custom configuration file is deleted'
@classmethod def clear(cls, persistent=False):
if persistent: try: os.unlink(cls.getPath()) except OSError as e: if (e.errno != errno.ENOENT): _getLogger().exception('Error %s while trying to remove dynamic configuration file: %s', e.errno, cls.getPath()) raise cls._path = None
'Returns a dict of all temporary values in custom configuration file'
@classmethod def getCustomDict(cls):
if (not os.path.exists(cls.getPath())): return dict() properties = Configuration._readConfigFile(os.path.basename(cls.getPath()), os.path.dirname(cls.getPath())) values = dict() for propName in properties: if ('value' in properties[propName]): values[propName] = properties[propName]['value'] return values
'Edits the XML configuration file with the parameters specified by properties Parameters: properties: dict of settings to be applied to the custom configuration store (key is property name, value is value)'
@classmethod def edit(cls, properties):
copyOfProperties = copy(properties) configFilePath = cls.getPath() try: with open(configFilePath, 'r') as fp: contents = fp.read() except IOError as e: if (e.errno != errno.ENOENT): _getLogger().exception('Error %s reading custom configuration store from %s, while editing properties %s.', e.errno, configFilePath, properties) raise contents = '<configuration/>' try: elements = ElementTree.XML(contents) ElementTree.tostring(elements) except Exception as e: msg = ("File contents of custom configuration is corrupt. File location: %s; Contents: '%s'. Original Error (%s): %s." % (configFilePath, contents, type(e), e)) _getLogger().exception(msg) raise RuntimeError(msg), None, sys.exc_info()[2] if (elements.tag != 'configuration'): e = ("Expected top-level element to be 'configuration' but got '%s'" % elements.tag) _getLogger().error(e) raise RuntimeError(e) for propertyItem in elements.findall('./property'): propInfo = dict(((attr.tag, attr.text) for attr in propertyItem)) name = propInfo['name'] if (name in copyOfProperties): foundValues = propertyItem.findall('./value') if (len(foundValues) > 0): foundValues[0].text = str(copyOfProperties.pop(name)) if (not copyOfProperties): break else: e = ('Property %s missing value tag.' % (name,)) _getLogger().error(e) raise RuntimeError(e) for (propertyName, value) in copyOfProperties.iteritems(): newProp = ElementTree.Element('property') nameTag = ElementTree.Element('name') nameTag.text = propertyName newProp.append(nameTag) valueTag = ElementTree.Element('value') valueTag.text = str(value) newProp.append(valueTag) elements.append(newProp) try: makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath)) with open(configFilePath, 'w') as fp: fp.write(ElementTree.tostring(elements)) except Exception as e: _getLogger().exception('Error while saving custom configuration properties %s in %s.', properties, configFilePath) raise
'Sets the path of the custom configuration file'
@classmethod def _setPath(cls):
cls._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'], cls.customFileName)
'Get the path of the custom configuration file'
@classmethod def getPath(cls):
if (cls._path is None): cls._setPath() return cls._path
'Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :raises: KeyError :returns: (string) property value'
@classmethod def getString(cls, prop):
if (cls._properties is None): cls._readStdConfigFiles() envValue = os.environ.get(('%s%s' % (cls.envPropPrefix, prop.replace('.', '_'))), None) if (envValue is not None): return envValue return cls._properties[prop]
'Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised :param prop: (string) name of the property :raises: KeyError, ValueError :returns: (bool) property value'
@classmethod def getBool(cls, prop):
value = cls.getInt(prop) if (value not in (0, 1)): raise ValueError(('Expected 0 or 1, but got %r in config property %s' % (value, prop))) return bool(value)
'Retrieve the requested property and return it as an int. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :returns: (int) property value'
@classmethod def getInt(cls, prop):
return int(cls.getString(prop))
'Retrieve the requested property and return it as a float. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :returns: (float) property value'
@classmethod def getFloat(cls, prop):
return float(cls.getString(prop))
'Get the value of the given configuration property as string. This returns a string which is the property value, or the value of "default" arg. If the property is not found, use :meth:`getString` instead. .. note:: it\'s atypical for our configuration properties to be missing - a missing configuration property is usually a very serious error. Because of this, it\'s preferable to use one of the :meth:`getString`, :meth:`getInt`, :meth:`getFloat`, etc. variants instead of :meth:`get`. Those variants will raise KeyError when an expected property is missing. :param prop: (string) name of the property :param default: default value to return if property does not exist :returns: (string) property value, or default if the property does not exist'
@classmethod def get(cls, prop, default=None):
try: return cls.getString(prop) except KeyError: return default
'Set the value of the given configuration property. :param prop: (string) name of the property :param value: (object) value to set'
@classmethod def set(cls, prop, value):
if (cls._properties is None): cls._readStdConfigFiles() cls._properties[prop] = str(value)
'Return a dict containing all of the configuration properties :returns: (dict) containing all configuration properties.'
@classmethod def dict(cls):
if (cls._properties is None): cls._readStdConfigFiles() result = dict(cls._properties) keys = os.environ.keys() replaceKeys = filter((lambda x: x.startswith(cls.envPropPrefix)), keys) for envKey in replaceKeys: key = envKey[len(cls.envPropPrefix):] key = key.replace('_', '.') result[key] = os.environ[envKey] return result
'Parse the given XML file and store all properties it describes. :param filename: (string) name of XML file to parse (no path) :param path: (string) path of the XML file. If None, then use the standard configuration search path.'
@classmethod def readConfigFile(cls, filename, path=None):
properties = cls._readConfigFile(filename, path) if (cls._properties is None): cls._properties = dict() for name in properties: if ('value' in properties[name]): cls._properties[name] = properties[name]['value']
'Parse the given XML file and return a dict describing the file. :param filename: (string) name of XML file to parse (no path) :param path: (string) path of the XML file. If None, then use the standard configuration search path. :returns: (dict) with each property as a key and a dict of all the property\'s attributes as value'
@classmethod def _readConfigFile(cls, filename, path=None):
outputProperties = dict() if (path is None): filePath = cls.findConfigFile(filename) else: filePath = os.path.join(path, filename) try: if (filePath is not None): try: _getLogger().debug('Loading config file: %s', filePath) with open(filePath, 'r') as inp: contents = inp.read() except Exception: raise RuntimeError(('Expected configuration file at %s' % filePath)) else: try: contents = resource_string('nupic.support', filename) except Exception as resourceException: if (filename in [USER_CONFIG, CUSTOM_CONFIG]): contents = '<configuration/>' else: raise resourceException elements = ElementTree.XML(contents) if (elements.tag != 'configuration'): raise RuntimeError(("Expected top-level element to be 'configuration' but got '%s'" % elements.tag)) propertyElements = elements.findall('./property') for propertyItem in propertyElements: propInfo = dict() propertyAttributes = list(propertyItem) for propertyAttribute in propertyAttributes: propInfo[propertyAttribute.tag] = propertyAttribute.text name = propInfo.get('name', None) if (('value' in propInfo) and (propInfo['value'] is None)): value = '' else: value = propInfo.get('value', None) if (value is None): if ('novalue' in propInfo): continue else: raise RuntimeError(("Missing 'value' element within the property element: => %s " % str(propInfo))) restOfValue = value value = '' while True: pos = restOfValue.find('${env.') if (pos == (-1)): value += restOfValue break value += restOfValue[0:pos] varTailPos = restOfValue.find('}', pos) if (varTailPos == (-1)): raise RuntimeError(("Trailing environment variable tag delimiter '}' not found in %r" % restOfValue)) varname = restOfValue[(pos + 6):varTailPos] if (varname not in os.environ): raise RuntimeError(('Attempting to use the value of the environment variable %r, which is not defined' % varname)) envVarValue = os.environ[varname] value += envVarValue restOfValue = restOfValue[(varTailPos + 1):] if (name is None): raise RuntimeError(("Missing 'name' element within following property element:\n => %s " % str(propInfo))) propInfo['value'] = value outputProperties[name] = propInfo return outputProperties except Exception: _getLogger().exception('Error while parsing configuration file: %s.', filePath) raise
'Clear out the entire configuration.'
@classmethod def clear(cls):
cls._properties = None cls._configPaths = None
'Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. :param filename: (string) name of file to locate'
@classmethod def findConfigFile(cls, filename):
paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
'Return the list of paths to search for configuration files. :returns: (list) of paths'
@classmethod def getConfigPaths(cls):
configPaths = [] if (cls._configPaths is not None): return cls._configPaths else: if ('NTA_CONF_PATH' in os.environ): configVar = os.environ['NTA_CONF_PATH'] configPaths = configVar.split(os.pathsep) return configPaths
'Modify the paths we use to search for configuration files. :param paths: (list) of paths to search for config files.'
@classmethod def setConfigPaths(cls, paths):
cls._configPaths = list(paths)
'Read in all standard configuration files'
@classmethod def _readStdConfigFiles(cls):
cls.readConfigFile(DEFAULT_CONFIG) cls.readConfigFile(USER_CONFIG)
'Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed'
def cPrint(self, level, message, *args, **kw):
if (level > self.consolePrinterVerbosity): return if (len(kw) > 1): raise KeyError(('Invalid keywords for cPrint: %s' % str(kw.keys()))) newline = kw.get('newline', True) if ((len(kw) == 1) and ('newline' not in kw)): raise KeyError(('Invalid keyword for cPrint: %s' % kw.keys()[0])) if (len(args) == 0): if newline: print message else: print message, elif newline: print (message % args) else: print (message % args),
'Generates a random sample from the discrete probability distribution and returns its value, the log of the probability of sampling that value and the log of the probability of sampling the current value (passed in).'
def propose(self, current, r):
stay = (r.uniform(0, 1) < self.kernel) if stay: logKernel = numpy.log(self.kernel) return (current, logKernel, logKernel) else: curIndex = self.keyMap[current] ri = r.randint(0, (self.nKeys - 1)) logKernel = numpy.log((1.0 - self.kernel)) lp = (logKernel + self.logp) if (ri < curIndex): return (self.keys[ri], lp, lp) else: return (self.keys[(ri + 1)], lp, lp)