desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'scalar space encoder'
def testScalarSpaceEncoder(self):
sse = ScalarSpaceEncoder(1, 1, 2, False, 2, 1, 1, None, 0, False, 'delta', forced=True) self.assertTrue(isinstance(sse, DeltaEncoder)) sse = ScalarSpaceEncoder(1, 1, 2, False, 2, 1, 1, None, 0, False, 'absolute', forced=True) self.assertFalse(isinstance(sse, DeltaEncoder))
'assert unrelated areas don"t share bits (outside of chance collisions)'
def testEncodeUnrelatedAreas(self):
avgThreshold = 0.3 maxThreshold = 0.12 overlaps = overlapsForUnrelatedAreas(1499, 37, 5) self.assertLess(np.max(overlaps), maxThreshold) self.assertLess(np.average(overlaps), avgThreshold) maxThreshold = 0.12 overlaps = overlapsForUnrelatedAreas(1499, 37, 10) self.assertLess(np.max(overlaps), maxThreshold) self.assertLess(np.average(overlaps), avgThreshold) maxThreshold = 0.17 overlaps = overlapsForUnrelatedAreas(999, 25, 10) self.assertLess(np.max(overlaps), maxThreshold) self.assertLess(np.average(overlaps), avgThreshold) maxThreshold = 0.25 overlaps = overlapsForUnrelatedAreas(499, 13, 10) self.assertLess(np.max(overlaps), maxThreshold) self.assertLess(np.average(overlaps), avgThreshold)
'simple delta reconstruction test'
def testDeltaEncoder(self):
for i in range(5): encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True) self._dencoder.setStateLock(True) for i in range(5, 7): encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True) res = self._dencoder.topDownCompute(encarr) self.assertEqual(res[0].value, 6) self.assertEqual(self._dencoder.topDownCompute(encarr)[0].value, res[0].value) self.assertEqual(self._dencoder.topDownCompute(encarr)[0].scalar, res[0].scalar) self.assertTrue(np.array_equal(self._dencoder.topDownCompute(encarr)[0].encoding, res[0].encoding))
'encoding verification test passed'
def testEncodingVerification(self):
feedIn = [1, 10, 4, 7, 9, 6, 3, 1] expectedOut = [0, 9, (-6), 3, 2, (-3), (-3), (-2)] self._dencoder.setStateLock(False) for i in range(len(feedIn)): aseencode = np.zeros(100) self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True) delencode = np.zeros(100) self._dencoder.encodeIntoArray(feedIn[i], delencode, learn=True) self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
'Check that locking the state works correctly'
def testLockingState(self):
feedIn = [1, 10, 9, 7, 9, 6, 3, 1] expectedOut = [0, 9, (-6), 3, 2, (-3), (-3), (-2)] for i in range(len(feedIn)): if (i == 3): self._dencoder.setStateLock(True) aseencode = np.zeros(100) self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True) delencode = np.zeros(100) if (i >= 3): self._dencoder.encodeIntoArray((feedIn[i] - feedIn[2]), delencode, learn=True) else: self._dencoder.encodeIntoArray(expectedOut[i], delencode, learn=True) self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
'creating date encoder instance'
def testDateEncoder(self):
self.assertSequenceEqual(self._e.getDescription(), [('season', 0), ('day of week', 12), ('weekend', 19), ('time of day', 21)]) self.assertTrue(numpy.array_equal(self._expected, self._bits))
'missing values'
def testMissingValues(self):
mvOutput = self._e.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(sum(mvOutput), 0)
'decoding date'
def testDecoding(self):
decoded = self._e.decode(self._bits) (fieldsDict, _) = decoded self.assertEqual(len(fieldsDict), 4) (ranges, _) = fieldsDict['season'] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [305, 305]) (ranges, _) = fieldsDict['time of day'] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [14.4, 14.4]) (ranges, _) = fieldsDict['day of week'] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [3, 3]) (ranges, _) = fieldsDict['weekend'] self.assertEqual(len(ranges), 1) self.assertSequenceEqual(ranges[0], [0, 0])
'Check topDownCompute'
def testTopDownCompute(self):
topDown = self._e.topDownCompute(self._bits) topDownValues = numpy.array([elem.value for elem in topDown]) errs = (topDownValues - numpy.array([320.25, 3.5, 0.167, 14.8])) self.assertAlmostEqual(errs.max(), 0, 4)
'Check bucket index support'
def testBucketIndexSupport(self):
bucketIndices = self._e.getBucketIndices(self._d) topDown = self._e.getBucketInfo(bucketIndices) topDownValues = numpy.array([elem.value for elem in topDown]) errs = (topDownValues - numpy.array([320.25, 3.5, 0.167, 14.8])) self.assertAlmostEqual(errs.max(), 0, 4) encodings = [] for x in topDown: encodings.extend(x.encoding) self.assertTrue(numpy.array_equal(encodings, self._expected))
'look at holiday more carefully because of the smooth transition'
def testHoliday(self):
e = DateEncoder(holiday=5, forced=True) holiday = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype='uint8') notholiday = numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0], dtype='uint8') holiday2 = numpy.array([0, 0, 0, 1, 1, 1, 1, 1, 0, 0], dtype='uint8') d = datetime.datetime(2010, 12, 25, 4, 55) self.assertTrue(numpy.array_equal(e.encode(d), holiday)) d = datetime.datetime(2008, 12, 27, 4, 55) self.assertTrue(numpy.array_equal(e.encode(d), notholiday)) d = datetime.datetime(1999, 12, 26, 8, 0) self.assertTrue(numpy.array_equal(e.encode(d), holiday2)) d = datetime.datetime(2011, 12, 24, 16, 0) self.assertTrue(numpy.array_equal(e.encode(d), holiday2))
'Test weekend encoder'
def testWeekend(self):
e = DateEncoder(customDays=(21, ['sat', 'sun', 'fri']), forced=True) mon = DateEncoder(customDays=(21, 'Monday'), forced=True) e2 = DateEncoder(weekend=(21, 1), forced=True) d = datetime.datetime(1988, 5, 29, 20, 0) self.assertTrue(numpy.array_equal(e.encode(d), e2.encode(d))) for _ in range(300): d = (d + datetime.timedelta(days=1)) self.assertTrue(numpy.array_equal(e.encode(d), e2.encode(d))) if (mon.decode(mon.encode(d))[0]['Monday'][0][0][0] == 1.0): self.assertEqual(d.weekday(), 0) else: self.assertNotEqual(d.weekday(), 0)
'Send bitmap as array of indicies'
def testEncodeArray(self):
e = self._encoder(self.n, name=self.name) bitmap = [2, 7, 15, 18, 23] out = e.encode(bitmap) self.assertEqual(out.sum(), len(bitmap)) x = e.decode(out) self.assertIsInstance(x[0], dict) self.assertTrue((self.name in x[0]))
'Send bitmap as array of indicies'
def testEncodeArrayInvalidW(self):
e = self._encoder(self.n, 3, name=self.name) with self.assertRaises(ValueError): e.encode([2]) with self.assertRaises(ValueError): e.encode([2, 7, 15, 18, 23])
'Compare two bitmaps for closeness'
def testClosenessScores(self):
e = self._encoder(self.n, name=self.name) 'Identical => 1' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [2, 7, 15, 18, 23] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 1.0) 'No overlap => 0' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [3, 9, 14, 19, 24] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.0) 'Similar => 4 of 5 match' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [2, 7, 17, 18, 23] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8) 'Little => 1 of 5 match' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [3, 7, 17, 19, 24] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.2) 'Extra active bit => off by 1 of 5' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [2, 7, 11, 15, 18, 23] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8) 'Missing active bit => off by 1 of 5' bitmap1 = [2, 7, 15, 18, 23] bitmap2 = [2, 7, 18, 23] out1 = e.encode(bitmap1) out2 = e.encode(bitmap2) c = e.closenessScores(out1, out2) self.assertEqual(c[0], 0.8)
'Verify that the values of buckets are as expected for given init params'
def testGetBucketValues(self):
le = LogEncoder(w=5, resolution=0.1, minval=1, maxval=10000, name='amount', forced=True) inc = 0.1 exp = 0 expected = [] while (exp <= 4.0001): val = (10 ** exp) expected.append(val) exp += inc expected = numpy.array(expected) actual = numpy.array(le.getBucketValues()) numpy.testing.assert_almost_equal(expected, actual, 7)
'Verifies you can use radius to specify a log encoder'
def testInitWithRadius(self):
le = LogEncoder(w=1, radius=1, minval=1, maxval=10000, name='amount', forced=True) self.assertEqual(le.encoder.n, 5) value = 1.0 output = le.encode(value) expected = [1, 0, 0, 0, 0] expected = numpy.array(expected, dtype='uint8') self.assertTrue(numpy.array_equal(output, expected)) value = 100.0 output = le.encode(value) expected = [0, 0, 1, 0, 0] expected = numpy.array(expected, dtype='uint8') self.assertTrue(numpy.array_equal(output, expected))
'Verifies you can use N to specify a log encoder'
def testInitWithN(self):
n = 100 le = LogEncoder(n=n, forced=True) self.assertEqual(le.encoder.n, n)
'Verifies unusual instances of minval and maxval are handled properly'
def testMinvalMaxVal(self):
self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=(-100), forced=True) self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=1e-07, forced=True) le = LogEncoder(n=100, minval=42, maxval=1300000000000.0, forced=True) expectedRadius = 0.552141792732 expectedResolution = 0.110428358546 self.assertAlmostEqual(le.encoder.radius, expectedRadius) self.assertAlmostEqual(le.encoder.resolution, expectedResolution)
'configuration.Configuration relies on static methods which load files by name. Since we need to be able to run tests and potentially change the content of those files between tests without interfering with one another and with the system configuration, this setUp() function will allocate temporary files used only during the using conf/nupic-default.xml and conf/nupic-site.xml (relative to the unit tests) as templates.'
def setUp(self):
self.files = {} with tempfile.NamedTemporaryFile(prefix='nupic-default.xml-unittest-', delete=False) as outp: self.addCleanup(os.remove, outp.name) with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp: outp.write(inp.read()) self.files['nupic-default.xml'] = outp.name with tempfile.NamedTemporaryFile(prefix='nupic-site.xml-unittest-', delete=False) as outp: self.addCleanup(os.remove, outp.name) with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp: outp.write(inp.read()) self.files['nupic-site.xml'] = outp.name
'Configures mocks for time.time and time.sleep such that every call to time.sleep(x) increments the return value of time.time() by x. mockTime: time.time mock mockSleep: time.sleep mock'
def mockSleepTime(self, mockTime, mockSleep):
class _TimeContainer(object, ): accumulatedTime = 0 def testTime(): return _TimeContainer.accumulatedTime def testSleep(duration): _TimeContainer.accumulatedTime += duration mockTime.side_effect = testTime mockSleep.side_effect = testSleep
'Test that when timeoutSec == 0, function is executed exactly once with no retries, and raises an exception on failure.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testRetryNoTimeForRetries(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=0, initialRetryDelaySec=0.2, maxRetryDelaySec=10) testFunction = Mock(side_effect=TestParentException('Test exception'), __name__='testFunction', autospec=True) with self.assertRaises(TestParentException): retryDecorator(testFunction)() self.assertFalse(mockSleep.called) testFunction.assert_called_once_with()
'Test that delay times are correct.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testRetryWaitsInitialRetryDelaySec(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=30, initialRetryDelaySec=2, maxRetryDelaySec=10) testFunction = Mock(side_effect=TestParentException('Test exception'), __name__='testFunction', autospec=True) with self.assertRaises(TestParentException): retryDecorator(testFunction)() self.assertEqual(mockSleep.mock_calls, [call(2), call(4), call(8), call(10), call(10)]) self.assertEqual(testFunction.call_count, 6)
'Test that retry is triggered if raised exception is in retryExceptions.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testRetryRetryExceptionIncluded(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=1, initialRetryDelaySec=1, maxRetryDelaySec=10, retryExceptions=(TestParentException,)) @retryDecorator def testFunction(): raise TestChildException('Test exception') with self.assertRaises(TestChildException): testFunction() self.assertEqual(mockSleep.call_count, 1)
'Test that retry is not triggered if raised exception is not in retryExceptions'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testRetryRetryExceptionExcluded(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) class TestExceptionA(Exception, ): pass class TestExceptionB(Exception, ): pass retryDecorator = decorators.retry(timeoutSec=1, initialRetryDelaySec=1, maxRetryDelaySec=10, retryExceptions=(TestExceptionA,)) @retryDecorator def testFunction(): raise TestExceptionB('Test exception') with self.assertRaises(TestExceptionB): testFunction() self.assertEqual(mockSleep.call_count, 0)
'Test that if retryFilter is specified and exception is in retryExceptions, retries iff retryFilter returns true.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testRetryRetryFilter(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecoratorTrueFilter = decorators.retry(timeoutSec=1, initialRetryDelaySec=1, maxRetryDelaySec=10, retryExceptions=(TestParentException,), retryFilter=(lambda _1, _2, _3: True)) @retryDecoratorTrueFilter def testFunctionTrue(): raise TestChildException('Test exception') with self.assertRaises(TestChildException): testFunctionTrue() self.assertEqual(mockSleep.call_count, 1) mockSleep.reset_mock() retryDecoratorFalseFilter = decorators.retry(timeoutSec=1, initialRetryDelaySec=1, maxRetryDelaySec=10, retryExceptions=(TestParentException,), retryFilter=(lambda _1, _2, _3: False)) @retryDecoratorFalseFilter def testFunctionFalse(): raise TestChildException('Test exception') with self.assertRaises(TestChildException): testFunctionFalse() self.assertEqual(mockSleep.call_count, 0)
'Test that docorated function receives only expected args and that it returns the expected value on success.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testReturnsExpectedWithExpectedArgs(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=30, initialRetryDelaySec=2, maxRetryDelaySec=10) testFunction = Mock(return_value=321, __name__='testFunction', autospec=True) returnValue = retryDecorator(testFunction)(1, 2, a=3, b=4) self.assertEqual(returnValue, 321) testFunction.assert_called_once_with(1, 2, a=3, b=4)
'If the initial call succeeds, test that no retries are performed.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testNoRetryIfCallSucceeds(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=30, initialRetryDelaySec=2, maxRetryDelaySec=10) testFunction = Mock(__name__='testFunction', autospec=True) retryDecorator(testFunction)() testFunction.assert_called_once_with()
'If initial attempts fail but subsequent attempt succeeds, ensure that expected number of retries is performed and expected value is returned.'
@patch('time.sleep', autospec=True) @patch('time.time', autospec=True) def testFailsFirstSucceedsLater(self, mockTime, mockSleep):
self.mockSleepTime(mockTime, mockSleep) retryDecorator = decorators.retry(timeoutSec=30, initialRetryDelaySec=2, maxRetryDelaySec=10) testFunction = Mock(side_effect=[TestParentException('Test exception 1'), TestParentException('Test exception 2'), 321], __name__='testFunction', autospec=True) returnValue = retryDecorator(testFunction)() self.assertEqual(returnValue, 321) self.assertEqual(testFunction.call_count, 3)
'Test generic usage of serializable mixin class'
def testReadFromAndWriteToFile(self):
class Bar(object, ): pass class Foo(Bar, Serializable, ): def __init__(self, bar): self.bar = bar @classmethod def getSchema(cls): return serializable_test_capnp.Foo @classmethod def read(cls, proto): foo = object.__new__(cls) foo.bar = proto.bar return foo def write(self, proto): proto.bar = self.bar def _remove(fname): if os.path.isfile(fname): os.remove(fname) self.addCleanup(_remove, 'foo.data') with open('foo.data', 'wb') as outp: Foo('bar').writeToFile(outp) with open('foo.data', 'rb') as inp: self.assertEqual(Foo.readFromFile(inp).bar, 'bar')
'Get the predictions and prediction confidences for all examples.'
@classmethod def setUpClass(cls):
for example in cls.examples: predictionGenerator = _getPredictionsGenerator(cls.examplesDir, example) for prediction in predictionGenerator(MAX_PREDICTIONS): cls.oneStepPredictions[example].append(prediction[0]) cls.oneStepConfidences[example].append(prediction[1]) cls.fiveStepPredictions[example].append(prediction[2]) cls.fiveStepConfidences[example].append(prediction[3])
'Make sure the examples directory is in the correct location'
def testExamplesDirExists(self):
failMsg = ('Path to examples does not exist: %s' % ExamplesTest.examplesDir) self.assertTrue(os.path.exists(ExamplesTest.examplesDir), failMsg)
'Make sure all examples output the same number of oneStepPredictions.'
def testNumberOfOneStepPredictions(self):
self.assertEquals(len(ExamplesTest.oneStepPredictions['opf']), len(ExamplesTest.oneStepPredictions['algo'])) self.assertEquals(len(ExamplesTest.oneStepPredictions['opf']), len(ExamplesTest.oneStepPredictions['network']))
'Make sure one-step predictions are the same for OPF and Algo API.'
@unittest.expectedFailure def testOneStepPredictionsOpfVsAlgo(self):
for resultPair in zip(self.oneStepPredictions['opf'], self.oneStepPredictions['algo']): assert_approx_equal(err_msg="one-step 'opf' and 'algo' differ", *resultPair)
'Make sure one-step predictions are the same for OPF and Network API.'
@unittest.expectedFailure def testOneStepPredictionsOpfVsNetwork(self):
for resultPair in zip(self.oneStepPredictions['opf'], self.oneStepPredictions['network']): assert_approx_equal(err_msg="one-step 'opf' and 'network' differ", *resultPair)
'Make sure one-step predictions are the same for Algo and Network API.'
@unittest.expectedFailure def testOneStepPredictionsAlgoVsNetwork(self):
for resultPair in zip(self.oneStepPredictions['algo'], self.oneStepPredictions['network']): assert_approx_equal(err_msg="one-step 'algo' and 'network' differ", *resultPair)
'Make sure five-step predictions are the same for OPF and Network API.'
@unittest.expectedFailure def testFiveStepPredictionsOpfVsNetwork(self):
for resultPair in zip(self.fiveStepPredictions['opf'], self.fiveStepPredictions['network']): assert_approx_equal(err_msg="five-step 'opf' and 'network' differ", *resultPair)
'Make sure one-step confidences are the same for OPF and Algo API.'
@unittest.expectedFailure def testOneStepConfidencesOpfVsAlgo(self):
for resultPair in zip(self.oneStepConfidences['opf'], self.oneStepConfidences['algo']): assert_approx_equal(err_msg="one-step 'opf' and 'algo' differ", *resultPair)
'Make sure one-step confidences are the same for OPF and Network API.'
@unittest.expectedFailure def testOneStepConfidencesOpfVsNetwork(self):
for resultPair in zip(self.oneStepConfidences['opf'], self.oneStepConfidences['network']): assert_approx_equal(err_msg="one-step 'opf' and 'network' differ", *resultPair)
'Make sure one-step confidences are the same for Algo and Network API.'
@unittest.expectedFailure def testOneStepConfidencesAlgoVsNetwork(self):
for resultPair in zip(self.oneStepConfidences['algo'], self.oneStepConfidences['network']): assert_approx_equal(err_msg="one-step 'algo' and 'network' differ", *resultPair)
'Make sure five-step confidences are the same for OPF and Network API.'
@unittest.expectedFailure def testFiveStepConfidencesOpfVsNetwork(self):
for resultPair in zip(self.fiveStepConfidences['opf'], self.fiveStepConfidences['network']): assert_approx_equal(err_msg="five-step 'opf' and 'network' differ", *resultPair)
'Runs basic FileRecordStream tests.'
def testBasic(self):
filename = _getTempFileName() fields = [FieldMetaInfo('name', FieldMetaType.string, FieldMetaSpecial.none), FieldMetaInfo('timestamp', FieldMetaType.datetime, FieldMetaSpecial.timestamp), FieldMetaInfo('integer', FieldMetaType.integer, FieldMetaSpecial.none), FieldMetaInfo('real', FieldMetaType.float, FieldMetaSpecial.none), FieldMetaInfo('reset', FieldMetaType.integer, FieldMetaSpecial.reset), FieldMetaInfo('sid', FieldMetaType.string, FieldMetaSpecial.sequence), FieldMetaInfo('categoryField', FieldMetaType.integer, FieldMetaSpecial.category)] fieldNames = ['name', 'timestamp', 'integer', 'real', 'reset', 'sid', 'categoryField'] print 'Creating temp file:', filename with FileRecordStream(streamID=filename, write=True, fields=fields) as s: self.assertEqual(0, s.getDataRowCount()) records = (['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1', 10], ['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1', 11], ['rec_3', datetime(day=3, month=3, year=2010), 12, 8.5, 0, 'seq-1', 12]) self.assertEqual(fields, s.getFields()) self.assertEqual(0, s.getNextRecordIdx()) print 'Writing records ...' for r in records: print list(r) s.appendRecord(list(r)) self.assertEqual(3, s.getDataRowCount()) recordsBatch = (['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1', 13], ['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1', 14], ['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1', 15]) print 'Adding batch of records...' for rec in recordsBatch: print rec s.appendRecords(recordsBatch) self.assertEqual(6, s.getDataRowCount()) with FileRecordStream(filename) as s: self.assertEqual(6, s.getDataRowCount()) self.assertEqual(fieldNames, s.getFieldNames()) self.assertEqual(0, s.getNextRecordIdx()) readStats = s.getStats() print 'Got stats:', readStats expectedStats = {'max': [None, None, 12, 11.5, 1, None, 15], 'min': [None, None, 2, 6.5, 0, None, 10]} self.assertEqual(expectedStats, readStats) readRecords = [] print 'Reading records ...' while True: r = s.getNextRecord() print r if (r is None): break readRecords.append(r) allRecords = (records + recordsBatch) for (r1, r2) in zip(allRecords, readRecords): self.assertEqual(r1, r2)
'Runs FileRecordStream tests with multiple category fields.'
def testMultipleClasses(self):
filename = _getTempFileName() fields = [FieldMetaInfo('name', FieldMetaType.string, FieldMetaSpecial.none), FieldMetaInfo('timestamp', FieldMetaType.datetime, FieldMetaSpecial.timestamp), FieldMetaInfo('integer', FieldMetaType.integer, FieldMetaSpecial.none), FieldMetaInfo('real', FieldMetaType.float, FieldMetaSpecial.none), FieldMetaInfo('reset', FieldMetaType.integer, FieldMetaSpecial.reset), FieldMetaInfo('sid', FieldMetaType.string, FieldMetaSpecial.sequence), FieldMetaInfo('categories', FieldMetaType.list, FieldMetaSpecial.category)] fieldNames = ['name', 'timestamp', 'integer', 'real', 'reset', 'sid', 'categories'] print 'Creating temp file:', filename with FileRecordStream(streamID=filename, write=True, fields=fields) as s: self.assertEqual(0, s.getDataRowCount()) records = (['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1', [0, 1, 2]], ['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1', [3, 4, 5]], ['rec_3', datetime(day=3, month=3, year=2010), 2, 8.5, 0, 'seq-1', [6, 7, 8]]) self.assertEqual(fields, s.getFields()) self.assertEqual(0, s.getNextRecordIdx()) print 'Writing records ...' for r in records: print r s.appendRecord(r) self.assertEqual(3, s.getDataRowCount()) recordsBatch = (['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1', [2, 3, 4]], ['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1', [3, 4, 5]], ['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1', [4, 5, 6]]) print 'Adding batch of records...' for rec in recordsBatch: print rec s.appendRecords(recordsBatch) self.assertEqual(6, s.getDataRowCount()) with FileRecordStream(filename) as s: self.assertEqual(6, s.getDataRowCount()) self.assertEqual(fieldNames, s.getFieldNames()) self.assertEqual(0, s.getNextRecordIdx()) readStats = s.getStats() print 'Got stats:', readStats expectedStats = {'max': [None, None, 11, 11.5, 1, None, None], 'min': [None, None, 2, 6.5, 0, None, None]} self.assertEqual(expectedStats, readStats) readRecords = [] print 'Reading records ...' while True: r = s.getNextRecord() print r if (r is None): break readRecords.append(r) expectedRecords = (['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1', [0, 1, 2]], ['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1', [3, 4, 5]], ['rec_3', datetime(day=3, month=3, year=2010), 2, 8.5, 0, 'seq-1', [6, 7, 8]], ['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1', [2, 3, 4]], ['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1', [3, 4, 5]], ['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1', [4, 5, 6]]) for (r1, r2) in zip(expectedRecords, readRecords): self.assertEqual(r1, r2)
'data looks like: should generate deltas "t" "s" "dt" "ds" t 10 X t+1s 20 1s 10 t+1d 50 86399 30 r t+1d+1s 60 X r+1d+3s 65 2s 5'
@unittest.skip('Disabled until we figure out why it is failing in internal tests') def testDeltaFilter(self):
r = RecordSensor() filename = resource_filename('nupic.datafiles', 'extra/qa/delta.csv') datasource = FileRecordStream(filename) r.dataSource = datasource n = 50 encoder = MultiEncoder({'blah': dict(fieldname='s', type='ScalarEncoder', n=n, w=11, minval=0, maxval=100)}) r.encoder = encoder resetOut = numpy.zeros((1,), dtype='float') sequenceIdOut = numpy.zeros((1,), dtype='float') dataOut = numpy.zeros((n,), dtype='float') sourceOut = numpy.zeros((1,), dtype='float') categoryOut = numpy.zeros((1,), dtype='float') outputs = dict(resetOut=resetOut, sourceOut=sourceOut, sequenceIdOut=sequenceIdOut, dataOut=dataOut, categoryOut=categoryOut) inputs = dict() r.verbosity = 0 r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16, minute=8, second=0)) self.assertEqual(lr['s'], 10) self.assertEqual(lr['_reset'], 1) self.assertTrue(('dt' not in lr)) self.assertTrue(('ds' not in lr)) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16, minute=8, second=1)) self.assertEqual(lr['s'], 20) self.assertEqual(lr['_reset'], 0) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16, minute=8, second=0)) self.assertEqual(lr['s'], 50) self.assertEqual(lr['_reset'], 0) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16, minute=8, second=1)) self.assertEqual(lr['s'], 60) self.assertEqual(lr['_reset'], 1) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16, minute=8, second=3)) self.assertEqual(lr['s'], 65) self.assertEqual(lr['_reset'], 0) r.preEncodingFilters = [DeltaFilter('s', 'ds'), DeltaFilter('t', 'dt')] r.rewind() r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=24, hour=16, minute=8, second=1)) self.assertEqual(lr['s'], 20) self.assertEqual(lr['_reset'], 1) self.assertEqual(lr['dt'], 1) self.assertEqual(lr['ds'], 10) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16, minute=8, second=0)) self.assertEqual(lr['s'], 50) self.assertEqual(lr['_reset'], 0) self.assertEqual(lr['dt'], ((3600 * 24) - 1)) self.assertEqual(lr['ds'], 30) r.compute(inputs, outputs) lr = r.lastRecord self.assertEqual(lr['t'], datetime(year=2011, month=2, day=25, hour=16, minute=8, second=3)) self.assertEqual(lr['s'], 65) self.assertEqual(lr['_reset'], 1) self.assertEqual(lr['dt'], 2) self.assertEqual(lr['ds'], 5)
'[ABC method implementation] retval: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record.'
def getNextRecord(self, useCache=True):
pass
'[ABC method implementation]'
def getFieldNames(self):
return self._fieldNames
'[ABC method implementation]'
def getFields(self):
return self._fieldsMeta
'Test that the (internal) moving average maintains the averages correctly, even for null initial condition and when the number of values goes over windowSize. Pass in integers and floats.'
def testMovingAverage(self):
historicalValues = [] total = 0 windowSize = 3 (newAverage, historicalValues, total) = MovingAverage.compute(historicalValues, total, 3, windowSize) self.assertEqual(newAverage, 3.0) self.assertEqual(historicalValues, [3.0]) self.assertEqual(total, 3.0) (newAverage, historicalValues, total) = MovingAverage.compute(historicalValues, total, 4, windowSize) self.assertEqual(newAverage, 3.5) self.assertListEqual(historicalValues, [3.0, 4.0]) self.assertEqual(total, 7.0) (newAverage, historicalValues, total) = MovingAverage.compute(historicalValues, total, 5.0, windowSize) self.assertEqual(newAverage, 4.0) self.assertListEqual(historicalValues, [3.0, 4.0, 5.0]) self.assertEqual(total, 12.0) (newAverage, historicalValues, total) = MovingAverage.compute(historicalValues, total, 6.0, windowSize) self.assertEqual(newAverage, 5.0) self.assertListEqual(historicalValues, [4.0, 5.0, 6.0]) self.assertEqual(total, 15.0)
'Test that the (internal) moving average maintains the averages correctly, even for null initial condition and when the number of values goes over windowSize. Pass in integers and floats. this is for the instantce method next()'
def testMovingAverageInstance(self):
ma = MovingAverage(windowSize=3) newAverage = ma.next(3) self.assertEqual(newAverage, 3.0) self.assertListEqual(ma.getSlidingWindow(), [3.0]) self.assertEqual(ma.total, 3.0) newAverage = ma.next(4) self.assertEqual(newAverage, 3.5) self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0]) self.assertEqual(ma.total, 7.0) newAverage = ma.next(5) self.assertEqual(newAverage, 4.0) self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0]) self.assertEqual(ma.total, 12.0) newAverage = ma.next(6) self.assertEqual(newAverage, 5.0) self.assertListEqual(ma.getSlidingWindow(), [4.0, 5.0, 6.0]) self.assertEqual(ma.total, 15.0)
'Test the slidingWindow value is correctly assigned when initializing a new MovingAverage object.'
def testMovingAverageSlidingWindowInit(self):
ma = MovingAverage(windowSize=3, existingHistoricalValues=[3.0, 4.0, 5.0]) self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0]) ma = MovingAverage(windowSize=3) self.assertListEqual(ma.getSlidingWindow(), [])
'serialization using pickle'
def testSerialization(self):
ma = MovingAverage(windowSize=3) ma.next(3) ma.next(4.5) ma.next(5) stored = pickle.dumps(ma) restored = pickle.loads(stored) self.assertEqual(restored, ma) self.assertEqual(ma.next(6), restored.next(6))
'See datetime.tzinfo.fromutc'
def fromutc(self, dt):
return (dt + self._utcoffset).replace(tzinfo=self)
'See datetime.tzinfo.utcoffset'
def utcoffset(self, dt):
return self._utcoffset
'See datetime.tzinfo.dst'
def dst(self, dt):
return _notime
'See datetime.tzinfo.tzname'
def tzname(self, dt):
return self._tzname
'Convert naive time to local time'
def localize(self, dt, is_dst=False):
if (dt.tzinfo is not None): raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self)
'Correct the timezone information on the given datetime'
def normalize(self, dt, is_dst=False):
if (dt.tzinfo is None): raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self)
'See datetime.tzinfo.fromutc'
def fromutc(self, dt):
dt = dt.replace(tzinfo=None) idx = max(0, (bisect_right(self._utc_transition_times, dt) - 1)) inf = self._transition_info[idx] return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
'Correct the timezone information on the given datetime If date arithmetic crosses DST boundaries, the tzinfo is not magically adjusted. This method normalizes the tzinfo to the correct one. To test, first we need to do some setup >>> from pytz import timezone >>> utc = timezone(\'UTC\') >>> eastern = timezone(\'US/Eastern\') >>> fmt = \'%Y-%m-%d %H:%M:%S %Z (%z)\' We next create a datetime right on an end-of-DST transition point, the instant when the wallclocks are wound back one hour. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) \'2002-10-27 01:00:00 EST (-0500)\' Now, if we subtract a few minutes from it, note that the timezone information has not changed. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) \'2002-10-27 00:50:00 EST (-0500)\' But we can fix that by calling the normalize method >>> before = eastern.normalize(before) >>> before.strftime(fmt) \'2002-10-27 01:50:00 EDT (-0400)\''
def normalize(self, dt):
if (dt.tzinfo is None): raise ValueError, 'Naive time - no tzinfo set' offset = dt.tzinfo._utcoffset dt = dt.replace(tzinfo=None) dt = (dt - offset) return self.fromutc(dt)
'Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight savings time. >>> from pytz import timezone >>> fmt = \'%Y-%m-%d %H:%M:%S %Z (%z)\' >>> amdam = timezone(\'Europe/Amsterdam\') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) \'2004-10-31 02:00:00 CEST (+0200)\' >>> loc_dt2.strftime(fmt) \'2004-10-31 02:00:00 CET (+0100)\' >>> str(loc_dt2 - loc_dt1) \'1:00:00\' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight savings >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print \'Oops\' Oops >>> loc_dt1 = amdam.localize(dt, is_dst=None) Traceback (most recent call last): AmbiguousTimeError: 2004-10-31 02:00:00 is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True'
def localize(self, dt, is_dst=False):
if (dt.tzinfo is not None): raise ValueError, 'Not naive datetime (tzinfo is already set)' possible_loc_dt = set() for tzinfo in self._tzinfos.values(): loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if (loc_dt.replace(tzinfo=None) == dt): possible_loc_dt.add(loc_dt) if (len(possible_loc_dt) == 1): return possible_loc_dt.pop() if (is_dst is None): raise AmbiguousTimeError(dt) filtered_possible_loc_dt = [p for p in possible_loc_dt if (bool(p.tzinfo._dst) == is_dst)] if (len(filtered_possible_loc_dt) == 1): return filtered_possible_loc_dt[0] if (len(filtered_possible_loc_dt) == 0): filtered_possible_loc_dt = list(possible_loc_dt) def mycmp(a, b): return cmp((a.replace(tzinfo=None) - a.tzinfo._utcoffset), (b.replace(tzinfo=None) - b.tzinfo._utcoffset)) filtered_possible_loc_dt.sort(mycmp) return filtered_possible_loc_dt[0]
'See datetime.tzinfo.utcoffset'
def utcoffset(self, dt):
return self._utcoffset
'See datetime.tzinfo.dst'
def dst(self, dt):
return self._dst
'See datetime.tzinfo.tzname'
def tzname(self, dt):
return self._tzname
'Convert naive time to local time'
def localize(self, dt, is_dst=False):
if (dt.tzinfo is not None): raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self)
'Correct the timezone information on the given datetime'
def normalize(self, dt, is_dst=False):
if (dt.tzinfo is None): raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self)
'Convert naive time to local time'
def localize(self, dt, is_dst=False):
if (dt.tzinfo is not None): raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self)
'Correct the timezone information on the given datetime'
def normalize(self, dt, is_dst=False):
if (dt.tzinfo is None): raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self)
'Create working set from list of path entries (default=sys.path)'
def __init__(self, entries=None):
self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if (entries is None): entries = sys.path for entry in entries: self.add_entry(entry)
'Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry,False)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.)'
def add_entry(self, entry):
self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False)
'True if `dist` is the active distribution for its project'
def __contains__(self, dist):
return (self.by_key.get(dist.key) == dist)
'Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned.'
def find(self, req):
dist = self.by_key.get(req.key) if ((dist is not None) and (dist not in req)): raise VersionConflict(dist, req) else: return dist
'Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order).'
def iter_entry_points(self, group, name=None):
for dist in self: entries = dist.get_entry_map(group) if (name is None): for ep in entries.values(): (yield ep) elif (name in entries): (yield entries[name])
'Locate distribution for `requires` and run `script_name` script'
def run_script(self, requires, script_name):
ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns)
'Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items\' path entries were added to the working set.'
def __iter__(self):
seen = {} for item in self.entries: for key in self.entry_keys[item]: if (key not in seen): seen[key] = 1 (yield self.by_key[key])
'Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set\'s ``.entries`` (if it wasn\'t already present). `dist` is only added to the working set if it\'s for a project that doesn\'t already have a distribution in the set. If it\'s added, any callbacks registered with the ``subscribe()`` method will be called.'
def add(self, dist, entry=None, insert=True):
if insert: dist.insert_on(self.entries, entry) if (entry is None): entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if (dist.key in self.by_key): return self.by_key[dist.key] = dist if (dist.key not in keys): keys.append(dist.key) if (dist.key not in keys2): keys2.append(dist.key) self._added_new(dist)
'List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``.'
def resolve(self, requirements, env=None, installer=None):
requirements = list(requirements)[::(-1)] processed = {} best = {} to_activate = [] while requirements: req = requirements.pop(0) if (req in processed): continue dist = best.get(req.key) if (dist is None): dist = self.by_key.get(req.key) if (dist is None): if (env is None): env = Environment(self.entries) dist = best[req.key] = env.best_match(req, self, installer) if (dist is None): raise DistributionNotFound(req) to_activate.append(dist) if (dist not in req): raise VersionConflict(dist, req) requirements.extend(dist.requires(req.extras)[::(-1)]) processed[req] = True return to_activate
'Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) map(working_set.add, distributions) # add plugins+libs to sys.path print "Couldn\'t load", errors # display errors The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project\'s "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance.'
def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
plugin_projects = list(plugin_env) plugin_projects.sort() error_info = {} distributions = {} if (full_env is None): env = Environment(self.entries) env += plugin_env else: env = (full_env + plugin_env) shadow_set = self.__class__([]) map(shadow_set.add, self) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: error_info[dist] = v if fallback: continue else: break else: map(shadow_set.add, resolvees) distributions.update(dict.fromkeys(resolvees)) break distributions = list(distributions) distributions.sort() return (distributions, error_info)
'Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set.'
def require(self, *requirements):
needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed
'Invoke `callback` for all distributions (including existing ones)'
def subscribe(self, callback):
if (callback in self.callbacks): return self.callbacks.append(callback) for dist in self: callback(dist)
'Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``\'2.4\'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version.'
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
self._distmap = {} self._cache = {} self.platform = platform self.python = python self.scan(search_path)
'Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned.'
def can_add(self, dist):
return (((self.python is None) or (dist.py_version is None) or (dist.py_version == self.python)) and compatible_platforms(dist.platform, self.platform))
'Remove `dist` from the environment'
def remove(self, dist):
self._distmap[dist.key].remove(dist)
'Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added.'
def scan(self, search_path=None):
if (search_path is None): search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist)
'Return a newest-to-oldest list of distributions for `project_name`'
def __getitem__(self, project_name):
try: return self._cache[project_name] except KeyError: project_name = project_name.lower() if (project_name not in self._distmap): return [] if (project_name not in self._cache): dists = self._cache[project_name] = self._distmap[project_name] _sort_dists(dists) return self._cache[project_name]
'Add `dist` if we ``can_add()`` it and it isn\'t already added'
def add(self, dist):
if (self.can_add(dist) and dist.has_version()): dists = self._distmap.setdefault(dist.key, []) if (dist not in dists): dists.append(dist) if (dist.key in self._cache): _sort_dists(self._cache[dist.key])
'Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn\'t active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment\'s ``obtain(req, installer)`` method will be returned.'
def best_match(self, req, working_set, installer=None):
dist = working_set.find(req) if (dist is not None): return dist for dist in self[req.key]: if (dist in req): return dist return self.obtain(req, installer)
'Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.'
def obtain(self, requirement, installer=None):
if (installer is not None): return installer(requirement)
'Yield the unique project names of the available distributions'
def __iter__(self):
for key in self._distmap.keys(): if self[key]: (yield key)
'In-place addition of a distribution or environment'
def __iadd__(self, other):
if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError(("Can't add %r to environment" % (other,))) return self
'Add an environment or distribution to an environment'
def __add__(self, other):
new = self.__class__([], platform=None, python=None) for env in (self, other): new += env return new
'Does the named resource exist?'
def resource_exists(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).has_resource(resource_name)
'Is the named resource an existing directory?'
def resource_isdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_isdir(resource_name)
'Return a true filesystem path for specified resource'
def resource_filename(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_filename(self, resource_name)
'Return a readable file-like object for specified resource'
def resource_stream(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_stream(self, resource_name)
'Return specified resource as a string'
def resource_string(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).get_resource_string(self, resource_name)
'List the contents of the named resource directory'
def resource_listdir(self, package_or_requirement, resource_name):
return get_provider(package_or_requirement).resource_listdir(resource_name)
'Give an error message for problems extracting file(s)'
def extraction_error(self):
old_exc = sys.exc_info()[1] cache_path = (self.extraction_path or get_default_cache()) err = ExtractionError(("Can't extract file(s) to egg cache\n\nThe following error occurred while trying to extract file(s) to the Python egg\ncache:\n\n %s\n\nThe Python egg cache directory is currently set to:\n\n %s\n\nPerhaps your account does not have write access to this directory? You can\nchange the cache directory by setting the PYTHON_EGG_CACHE environment\nvariable to point to an accessible directory.\n" % (old_exc, cache_path))) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err
'Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg\'s extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later.'
def get_cache_path(self, archive_name, names=()):
extract_path = (self.extraction_path or get_default_cache()) target_path = os.path.join(extract_path, (archive_name + '-tmp'), *names) try: ensure_directory(target_path) except: self.extraction_error() self.cached_files[target_path] = 1 return target_path
'Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don\'t have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns.'
def postprocess(self, tempname, filename):
if (os.name == 'posix'): mode = ((os.stat(tempname).st_mode | 365) & 4095) os.chmod(tempname, mode)
'Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine\'s documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.)'
def set_extraction_path(self, path):
if self.cached_files: raise ValueError("Can't change extraction path, files already extracted") self.extraction_path = path