body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
'Should use dev browser firstrun template for 36.0a2'
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) | -6,082,857,057,709,693,000 | Should use dev browser firstrun template for 36.0a2 | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_36_0_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) |
@override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
'Should use standard firstrun template for older aurora'
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html']) | 3,963,251,421,227,757,600 | Should use standard firstrun template for older aurora | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_34_0_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
'Should use search tour template for 34.0'
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html']) | 597,794,562,570,242,400 | Should use search tour template for 34.0 | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
'Should use search tour template for 34.0.5'
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html']) | -1,516,847,940,988,662,300 | Should use search tour template for 34.0.5 | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0_5 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
'Should use australis template for 34.0 non en-US locales'
req = self.rf.get('/en-US/firefox/tour/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html']) | -147,627,887,027,707,520 | Should use australis template for 34.0 non en-US locales | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0_locales | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
req = self.rf.get('/en-US/firefox/tour/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html']) |
@override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
'Should redirect to https'
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], ('https://testserver' + url)) | 6,487,019,531,341,117,000 | Should redirect to https | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect | MozFux/bedrock | python | @override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], ('https://testserver' + url)) |
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
'Should not redirect to https: in DEV mode.'
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) | -1,218,235,115,904,298,800 | Should not redirect to https: in DEV mode. | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect_not_dev | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) |
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
'Should not redirect to https: when already secure.'
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) | 8,461,759,121,701,641,000 | Should not redirect to https: when already secure. | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect_secure | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) |
@override_settings(DEV=True)
def test_can_post(self, render_mock):
'Home page must accept post for newsletter signup.'
req = self.rf.post('/en-US/firefox/firstrun/')
self.view(req)
render_mock.assert_called_once_with(req, ['firefox/australis/firstrun-tour.html'], ANY) | -5,104,610,902,209,316,000 | Home page must accept post for newsletter signup. | bedrock/firefox/tests/test_base.py | test_can_post | MozFux/bedrock | python | @override_settings(DEV=True)
def test_can_post(self, render_mock):
req = self.rf.post('/en-US/firefox/firstrun/')
self.view(req)
render_mock.assert_called_once_with(req, ['firefox/australis/firstrun-tour.html'], ANY) |
@override_settings(DEV=True)
def test_fx_australis_29(self, render_mock):
'Should use firstrun tour template'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='29.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) | 178,032,531,148,716,450 | Should use firstrun tour template | bedrock/firefox/tests/test_base.py | test_fx_australis_29 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_australis_29(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='29.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) |
@override_settings(DEV=True)
def test_fx_dev_browser_35_0_a2(self, render_mock):
'Should use dev browser firstrun template for 35.0a2'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) | 7,794,727,572,872,333,000 | Should use dev browser firstrun template for 35.0a2 | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_35_0_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_35_0_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) |
@override_settings(DEV=True)
def test_fx_dev_browser_35_1_a2(self, render_mock):
'Should use dev browser firstrun template for 35.1a2'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.1a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) | 4,322,405,846,441,060,000 | Should use dev browser firstrun template for 35.1a2 | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_35_1_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_35_1_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.1a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) |
@override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
'Should use dev browser firstrun template for 36.0a2'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) | -873,573,943,611,264,300 | Should use dev browser firstrun template for 36.0a2 | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_36_0_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html']) |
@override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
'Should use standard firstrun template for older aurora'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) | 7,913,842,625,163,997,000 | Should use standard firstrun template for older aurora | bedrock/firefox/tests/test_base.py | test_fx_dev_browser_34_0_a2 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
'Should use search tour template for 34.0'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html']) | -2,371,753,744,900,670,500 | Should use search tour template for 34.0 | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
'Should use search tour template for 34.0.5'
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html']) | 6,636,234,812,839,590,000 | Should use search tour template for 34.0.5 | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0_5 | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html']) |
@override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
'Should use australis template for 34.0 non en-US locales'
req = self.rf.get('/en-US/firefox/firstrun/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) | -3,163,513,675,798,710,000 | Should use australis template for 34.0 non en-US locales | bedrock/firefox/tests/test_base.py | test_fx_search_tour_34_0_locales | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
req = self.rf.get('/en-US/firefox/firstrun/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html']) |
@override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
'Should redirect to https:'
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], ('https://testserver' + url)) | -2,393,300,253,933,957,600 | Should redirect to https: | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect | MozFux/bedrock | python | @override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], ('https://testserver' + url)) |
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
'Should not redirect to https: in DEV mode.'
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) | -5,515,797,027,378,008,000 | Should not redirect to https: in DEV mode. | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect_not_dev | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) |
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
'Should not redirect to https: when already secure.'
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) | 8,509,083,562,283,158,000 | Should not redirect to https: when already secure. | bedrock/firefox/tests/test_base.py | test_fx_australis_secure_redirect_secure | MozFux/bedrock | python | @override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200) |
def test_non_firefox(self):
'\n Any non-Firefox user agents should be permanently redirected to\n /firefox/new/.\n '
user_agent = 'random'
self.assert_ua_redirects_to(user_agent, 'firefox.new') | 7,932,674,574,691,870,000 | Any non-Firefox user agents should be permanently redirected to
/firefox/new/. | bedrock/firefox/tests/test_base.py | test_non_firefox | MozFux/bedrock | python | def test_non_firefox(self):
'\n Any non-Firefox user agents should be permanently redirected to\n /firefox/new/.\n '
user_agent = 'random'
self.assert_ua_redirects_to(user_agent, 'firefox.new') |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='13.0.5')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('13.0.5', GOOD_PLATS))
def test_current_minor_version_firefox(self, latest_mock):
'\n Should show current even if behind by a patch version\n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) Gecko/20100101 Firefox/13.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') | -2,307,934,918,529,319,400 | Should show current even if behind by a patch version | bedrock/firefox/tests/test_base.py | test_current_minor_version_firefox | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='13.0.5')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('13.0.5', GOOD_PLATS))
def test_current_minor_version_firefox(self, latest_mock):
'\n \n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) Gecko/20100101 Firefox/13.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='25.0', FIREFOX_ESR='24.1')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('25.0', GOOD_PLATS))
def test_esr_firefox(self, latest_mock):
'\n Currently released ESR firefoxen should not redirect. At present\n that is 24.0.x.\n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') | -1,867,156,132,769,996,800 | Currently released ESR firefoxen should not redirect. At present
that is 24.0.x. | bedrock/firefox/tests/test_base.py | test_esr_firefox | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='25.0', FIREFOX_ESR='24.1')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('25.0', GOOD_PLATS))
def test_esr_firefox(self, latest_mock):
'\n Currently released ESR firefoxen should not redirect. At present\n that is 24.0.x.\n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('16.0', GOOD_PLATS))
def test_current_firefox(self, latest_mock):
'\n Currently released firefoxen should not redirect.\n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:16.0) Gecko/20100101 Firefox/16.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') | 438,300,697,182,293,600 | Currently released firefoxen should not redirect. | bedrock/firefox/tests/test_base.py | test_current_firefox | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('16.0', GOOD_PLATS))
def test_current_firefox(self, latest_mock):
'\n \n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:16.0) Gecko/20100101 Firefox/16.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('16.0', GOOD_PLATS))
def test_future_firefox(self, latest_mock):
'\n Pre-release firefoxen should not redirect.\n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:18.0) Gecko/20100101 Firefox/18.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') | 6,772,237,399,750,246,000 | Pre-release firefoxen should not redirect. | bedrock/firefox/tests/test_base.py | test_future_firefox | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version', return_value=('16.0', GOOD_PLATS))
def test_future_firefox(self, latest_mock):
'\n \n '
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:18.0) Gecko/20100101 Firefox/18.0'
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent') |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_tour_oldversion(self):
'Should not show tour if upgrading from 33.1 onwards.'
response = self.client.get((self.url + '?oldversion=28.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=27.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=4.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=rv:10.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.1.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=35.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content) | 3,527,238,019,798,652,400 | Should not show tour if upgrading from 33.1 onwards. | bedrock/firefox/tests/test_base.py | test_whatsnew_tour_oldversion | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_tour_oldversion(self):
response = self.client.get((self.url + '?oldversion=28.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=27.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=4.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=rv:10.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.1.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=35.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content) |
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_search_tour_oldversion(self):
'Should not show tour if upgrading from 34.0 onwards.'
self.url = reverse('firefox.whatsnew', args=['34.1'])
response = self.client.get((self.url + '?oldversion=28.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=27.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=4.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=rv:10.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=35.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content) | 3,656,918,458,974,156,300 | Should not show tour if upgrading from 34.0 onwards. | bedrock/firefox/tests/test_base.py | test_whatsnew_search_tour_oldversion | MozFux/bedrock | python | @override_settings(DEV=True)
@patch.dict(product_details.firefox_versions, LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_search_tour_oldversion(self):
self.url = reverse('firefox.whatsnew', args=['34.1'])
response = self.client.get((self.url + '?oldversion=28.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=27.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=4.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=rv:10.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=33.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.0.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=34.1'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get((self.url + '?oldversion=35.0'), HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content) |
def calculate_statistics(Y_hat, Y, beta=1, average=None):
"\n\tCalculate the precisions, recalls, F-beta scores, and\n\tsupports for each class in `targets`.\n\n\tParameters\n\t----------\n\tY_hat : array-like, shape=(n_samples,)\n\t\tList of data labels.\n\n\tY : array-like, shape=(n_samples,)\n\t\tList of target truth labels.\n\n\tbeta : float, default=1\n\t\tStrength of recall relative to precision in the F-score.\n\n\taverage : {'micro', 'macro', 'weighted', None}, default=None\n\t\tThe type of averaging to perform on statistics. Must be one of:\n\t\t - None : Do not perform averaging, statistics for each class\n\t\t \t\t\tare returned.\n\t\t - 'micro' : Calculate globally, counting total true positives,\n\t\t \t\t\t\tfalse negatives, and false positives.\n\t\t - 'macro' : Calculate per class an unweighted mean.\n\t\t - 'weighted' : Calculate per class the mean weighted by support.\n\n\tReturns\n\t-------\n\tprecisions : float or dict\n\t\tDictionary of precisions for each class if `average` is None.\n\t\tAveraged precision based on averaging method if provided.\n\n\trecalls : float or dict\n\t\tDictionary of recalls for each class if `average` is None.\n\t\tAveraged recall based on averaging method if provided.\n\n\tfscores : float or dict\n\t\tDictionary of fscores for each class if `average` is None.\n\t\tAveraged fscore based on averaging method if provided.\n\n\tsupports : float or dict\n\t\tDictionary of supports for each class if `average` is None.\n\t\tTotal support (number of classes) if averaging method is provided.\n\t"
if (beta < 0):
raise ValueError('Beta must be non-negative')
matrix = multiconfusion_matrix(Y_hat, Y)
matrix_labels = list(matrix.keys())
matrix = np.array([matrix[l] for l in matrix_labels])
tp_sum = matrix[:, 1, 1]
label_sum = (tp_sum + matrix[:, 0, 1])
target_sum = (tp_sum + matrix[:, 1, 0])
if (average == 'micro'):
tp_sum = np.array([tp_sum.sum()])
label_sum = np.array([label_sum.sum()])
target_sum = np.array([target_sum.sum()])
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.divide(tp_sum, label_sum, out=np.zeros(tp_sum.shape, dtype=float), where=(label_sum != 0))
recalls = np.divide(tp_sum, target_sum, out=np.zeros(tp_sum.shape, dtype=float), where=(target_sum != 0))
if np.isposinf(beta):
fscores = recalls
else:
beta2 = (beta ** 2)
denom = ((beta2 * precisions) + recalls)
valid = np.where((denom != 0))[0]
fscores = np.zeros_like(denom)
fscores[valid] = ((((1 + beta2) * precisions[valid]) * recalls[valid]) / denom[valid])
if (average == 'weighted'):
weights = target_sum
if (target_sum.sum() == 0):
return (0, 0, 0, target_sum.sum())
else:
weights = None
if (average is not None):
precisions = np.average(precisions, weights=weights)
recalls = np.average(recalls, weights=weights)
fscores = np.average(fscores, weights=weights)
supports = target_sum.sum()
else:
precisions = {matrix_labels[k]: precisions[k] for k in range(len(matrix_labels))}
recalls = {matrix_labels[k]: recalls[k] for k in range(len(matrix_labels))}
fscores = {matrix_labels[k]: fscores[k] for k in range(len(matrix_labels))}
supports = {matrix_labels[k]: target_sum[k] for k in range(len(matrix_labels))}
return (precisions, recalls, fscores, supports) | 492,335,461,752,231,600 | Calculate the precisions, recalls, F-beta scores, and
supports for each class in `targets`.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
average : {'micro', 'macro', 'weighted', None}, default=None
The type of averaging to perform on statistics. Must be one of:
- None : Do not perform averaging, statistics for each class
are returned.
- 'micro' : Calculate globally, counting total true positives,
false negatives, and false positives.
- 'macro' : Calculate per class an unweighted mean.
- 'weighted' : Calculate per class the mean weighted by support.
Returns
-------
precisions : float or dict
Dictionary of precisions for each class if `average` is None.
Averaged precision based on averaging method if provided.
recalls : float or dict
Dictionary of recalls for each class if `average` is None.
Averaged recall based on averaging method if provided.
fscores : float or dict
Dictionary of fscores for each class if `average` is None.
Averaged fscore based on averaging method if provided.
supports : float or dict
Dictionary of supports for each class if `average` is None.
Total support (number of classes) if averaging method is provided. | sleepens/analysis/_report.py | calculate_statistics | paradoxysm/sleepens | python | def calculate_statistics(Y_hat, Y, beta=1, average=None):
"\n\tCalculate the precisions, recalls, F-beta scores, and\n\tsupports for each class in `targets`.\n\n\tParameters\n\t----------\n\tY_hat : array-like, shape=(n_samples,)\n\t\tList of data labels.\n\n\tY : array-like, shape=(n_samples,)\n\t\tList of target truth labels.\n\n\tbeta : float, default=1\n\t\tStrength of recall relative to precision in the F-score.\n\n\taverage : {'micro', 'macro', 'weighted', None}, default=None\n\t\tThe type of averaging to perform on statistics. Must be one of:\n\t\t - None : Do not perform averaging, statistics for each class\n\t\t \t\t\tare returned.\n\t\t - 'micro' : Calculate globally, counting total true positives,\n\t\t \t\t\t\tfalse negatives, and false positives.\n\t\t - 'macro' : Calculate per class an unweighted mean.\n\t\t - 'weighted' : Calculate per class the mean weighted by support.\n\n\tReturns\n\t-------\n\tprecisions : float or dict\n\t\tDictionary of precisions for each class if `average` is None.\n\t\tAveraged precision based on averaging method if provided.\n\n\trecalls : float or dict\n\t\tDictionary of recalls for each class if `average` is None.\n\t\tAveraged recall based on averaging method if provided.\n\n\tfscores : float or dict\n\t\tDictionary of fscores for each class if `average` is None.\n\t\tAveraged fscore based on averaging method if provided.\n\n\tsupports : float or dict\n\t\tDictionary of supports for each class if `average` is None.\n\t\tTotal support (number of classes) if averaging method is provided.\n\t"
if (beta < 0):
raise ValueError('Beta must be non-negative')
matrix = multiconfusion_matrix(Y_hat, Y)
matrix_labels = list(matrix.keys())
matrix = np.array([matrix[l] for l in matrix_labels])
tp_sum = matrix[:, 1, 1]
label_sum = (tp_sum + matrix[:, 0, 1])
target_sum = (tp_sum + matrix[:, 1, 0])
if (average == 'micro'):
tp_sum = np.array([tp_sum.sum()])
label_sum = np.array([label_sum.sum()])
target_sum = np.array([target_sum.sum()])
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.divide(tp_sum, label_sum, out=np.zeros(tp_sum.shape, dtype=float), where=(label_sum != 0))
recalls = np.divide(tp_sum, target_sum, out=np.zeros(tp_sum.shape, dtype=float), where=(target_sum != 0))
if np.isposinf(beta):
fscores = recalls
else:
beta2 = (beta ** 2)
denom = ((beta2 * precisions) + recalls)
valid = np.where((denom != 0))[0]
fscores = np.zeros_like(denom)
fscores[valid] = ((((1 + beta2) * precisions[valid]) * recalls[valid]) / denom[valid])
if (average == 'weighted'):
weights = target_sum
if (target_sum.sum() == 0):
return (0, 0, 0, target_sum.sum())
else:
weights = None
if (average is not None):
precisions = np.average(precisions, weights=weights)
recalls = np.average(recalls, weights=weights)
fscores = np.average(fscores, weights=weights)
supports = target_sum.sum()
else:
precisions = {matrix_labels[k]: precisions[k] for k in range(len(matrix_labels))}
recalls = {matrix_labels[k]: recalls[k] for k in range(len(matrix_labels))}
fscores = {matrix_labels[k]: fscores[k] for k in range(len(matrix_labels))}
supports = {matrix_labels[k]: target_sum[k] for k in range(len(matrix_labels))}
return (precisions, recalls, fscores, supports) |
def classification_report(Y_hat, Y, beta=1):
"\n\tCreate a report on classification statistics.\n\n\tParameters\n\t----------\n\tY_hat : array-like, shape=(n_samples,)\n\t\tList of data labels.\n\n\tY : array-like, shape=(n_samples,)\n\t\tList of target truth labels.\n\n\tbeta : float, default=1\n\t\tStrength of recall relative to precision in the F-score.\n\n\tReturns\n\t-------\n\treport : dict\n\t\tDictionary containing classification statistics in the following\n\t\tstructure:\n\t\t - 'label': {\n\t\t \t\t\t\t'precision':0.5,\n\t\t\t\t\t\t'recall':1.0,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t\t\t\t\t'support':1\n\t\t \t\t\t},\n\t\t ...\n\t\t - 'beta': 1,\n\t\t - 'support': 5,\n\t\t - 'accuracy': 0.8,\n\t\t - 'macro avg': {\n\t\t \t\t\t\t'precision':0.6,\n\t\t\t\t\t\t'recall':0.9,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t \t\t\t},\n\t\t - 'weighted avg': {\n\t\t \t\t\t\t'precision':0.67,\n\t\t\t\t\t\t'recall':0.9,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t \t\t\t}\n\t"
stats = calculate_statistics(Y_hat, Y, beta=beta)
(_, _, accuracy, total) = calculate_statistics(Y_hat, Y, beta=beta, average='micro')
macro = calculate_statistics(Y_hat, Y, beta=beta, average='macro')
weighted = calculate_statistics(Y_hat, Y, beta=beta, average='weighted')
h = ['precision', 'recall', 'f-score', 'support']
report = {'beta': beta, 'support': total, 'accuracy': accuracy, 'macro avg': {h[i]: macro[i] for i in range(len(h))}, 'weighted avg': {h[i]: weighted[i] for i in range(len(h))}}
classes = set(stats[0].keys())
for c in classes:
report[c] = {h[i]: stats[i][c] for i in range(len(h))}
return report | -4,029,619,413,932,635,000 | Create a report on classification statistics.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
Returns
-------
report : dict
Dictionary containing classification statistics in the following
structure:
- 'label': {
'precision':0.5,
'recall':1.0,
'f-score':0.67,
'support':1
},
...
- 'beta': 1,
- 'support': 5,
- 'accuracy': 0.8,
- 'macro avg': {
'precision':0.6,
'recall':0.9,
'f-score':0.67,
},
- 'weighted avg': {
'precision':0.67,
'recall':0.9,
'f-score':0.67,
} | sleepens/analysis/_report.py | classification_report | paradoxysm/sleepens | python | def classification_report(Y_hat, Y, beta=1):
"\n\tCreate a report on classification statistics.\n\n\tParameters\n\t----------\n\tY_hat : array-like, shape=(n_samples,)\n\t\tList of data labels.\n\n\tY : array-like, shape=(n_samples,)\n\t\tList of target truth labels.\n\n\tbeta : float, default=1\n\t\tStrength of recall relative to precision in the F-score.\n\n\tReturns\n\t-------\n\treport : dict\n\t\tDictionary containing classification statistics in the following\n\t\tstructure:\n\t\t - 'label': {\n\t\t \t\t\t\t'precision':0.5,\n\t\t\t\t\t\t'recall':1.0,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t\t\t\t\t'support':1\n\t\t \t\t\t},\n\t\t ...\n\t\t - 'beta': 1,\n\t\t - 'support': 5,\n\t\t - 'accuracy': 0.8,\n\t\t - 'macro avg': {\n\t\t \t\t\t\t'precision':0.6,\n\t\t\t\t\t\t'recall':0.9,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t \t\t\t},\n\t\t - 'weighted avg': {\n\t\t \t\t\t\t'precision':0.67,\n\t\t\t\t\t\t'recall':0.9,\n\t\t\t\t\t\t'f-score':0.67,\n\t\t \t\t\t}\n\t"
stats = calculate_statistics(Y_hat, Y, beta=beta)
(_, _, accuracy, total) = calculate_statistics(Y_hat, Y, beta=beta, average='micro')
macro = calculate_statistics(Y_hat, Y, beta=beta, average='macro')
weighted = calculate_statistics(Y_hat, Y, beta=beta, average='weighted')
h = ['precision', 'recall', 'f-score', 'support']
report = {'beta': beta, 'support': total, 'accuracy': accuracy, 'macro avg': {h[i]: macro[i] for i in range(len(h))}, 'weighted avg': {h[i]: weighted[i] for i in range(len(h))}}
classes = set(stats[0].keys())
for c in classes:
report[c] = {h[i]: stats[i][c] for i in range(len(h))}
return report |
def conv_forward_im2col(x, w, b, conv_param):
'\n A fast implementation of the forward pass for a convolutional layer\n based on im2col and col2im.\n '
(N, C, H, W) = x.shape
(num_filters, _, filter_height, filter_width) = w.shape
(stride, pad) = (conv_param['stride'], conv_param['pad'])
assert ((((W + (2 * pad)) - filter_width) % stride) == 0), 'width does not work'
assert ((((H + (2 * pad)) - filter_height) % stride) == 0), 'height does not work'
out_height = ((((H + (2 * pad)) - filter_height) / stride) + 1)
out_width = ((((W + (2 * pad)) - filter_width) / stride) + 1)
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = (w.reshape((w.shape[0], (- 1))).dot(x_cols) + b.reshape((- 1), 1))
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return (out, cache) | -9,140,524,359,592,905,000 | A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im. | 2016winter/assignment2/cs231n/fast_layers.py | conv_forward_im2col | anandsaha/cs231n.assignments | python | def conv_forward_im2col(x, w, b, conv_param):
'\n A fast implementation of the forward pass for a convolutional layer\n based on im2col and col2im.\n '
(N, C, H, W) = x.shape
(num_filters, _, filter_height, filter_width) = w.shape
(stride, pad) = (conv_param['stride'], conv_param['pad'])
assert ((((W + (2 * pad)) - filter_width) % stride) == 0), 'width does not work'
assert ((((H + (2 * pad)) - filter_height) % stride) == 0), 'height does not work'
out_height = ((((H + (2 * pad)) - filter_height) / stride) + 1)
out_width = ((((W + (2 * pad)) - filter_width) / stride) + 1)
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = (w.reshape((w.shape[0], (- 1))).dot(x_cols) + b.reshape((- 1), 1))
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return (out, cache) |
def conv_backward_im2col(dout, cache):
'\n A fast implementation of the backward pass for a convolutional layer\n based on im2col and col2im.\n '
(x, w, b, conv_param, x_cols) = cache
(stride, pad) = (conv_param['stride'], conv_param['pad'])
db = np.sum(dout, axis=(0, 2, 3))
(num_filters, _, filter_height, filter_width) = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, (- 1))
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, (- 1)).T.dot(dout_reshaped)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3], filter_height, filter_width, pad, stride)
return (dx, dw, db) | 6,272,160,090,445,658,000 | A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im. | 2016winter/assignment2/cs231n/fast_layers.py | conv_backward_im2col | anandsaha/cs231n.assignments | python | def conv_backward_im2col(dout, cache):
'\n A fast implementation of the backward pass for a convolutional layer\n based on im2col and col2im.\n '
(x, w, b, conv_param, x_cols) = cache
(stride, pad) = (conv_param['stride'], conv_param['pad'])
db = np.sum(dout, axis=(0, 2, 3))
(num_filters, _, filter_height, filter_width) = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, (- 1))
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, (- 1)).T.dot(dout_reshaped)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3], filter_height, filter_width, pad, stride)
return (dx, dw, db) |
def max_pool_forward_fast(x, pool_param):
'\n A fast implementation of the forward pass for a max pooling layer.\n\n This chooses between the reshape method and the im2col method. If the pooling\n regions are square and tile the input image, then we can use the reshape\n method which is very fast. Otherwise we fall back on the im2col method, which\n is not much faster than the naive method.\n '
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
same_size = (pool_height == pool_width == stride)
tiles = (((H % pool_height) == 0) and ((W % pool_width) == 0))
if (same_size and tiles):
(out, reshape_cache) = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
(out, im2col_cache) = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return (out, cache) | 7,199,667,716,231,183,000 | A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_forward_fast | anandsaha/cs231n.assignments | python | def max_pool_forward_fast(x, pool_param):
'\n A fast implementation of the forward pass for a max pooling layer.\n\n This chooses between the reshape method and the im2col method. If the pooling\n regions are square and tile the input image, then we can use the reshape\n method which is very fast. Otherwise we fall back on the im2col method, which\n is not much faster than the naive method.\n '
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
same_size = (pool_height == pool_width == stride)
tiles = (((H % pool_height) == 0) and ((W % pool_width) == 0))
if (same_size and tiles):
(out, reshape_cache) = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
(out, im2col_cache) = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return (out, cache) |
def max_pool_backward_fast(dout, cache):
'\n A fast implementation of the backward pass for a max pooling layer.\n\n This switches between the reshape method an the im2col method depending on\n which method was used to generate the cache.\n '
(method, real_cache) = cache
if (method == 'reshape'):
return max_pool_backward_reshape(dout, real_cache)
elif (method == 'im2col'):
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError(('Unrecognized method "%s"' % method)) | 2,514,422,792,829,799,400 | A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_backward_fast | anandsaha/cs231n.assignments | python | def max_pool_backward_fast(dout, cache):
'\n A fast implementation of the backward pass for a max pooling layer.\n\n This switches between the reshape method an the im2col method depending on\n which method was used to generate the cache.\n '
(method, real_cache) = cache
if (method == 'reshape'):
return max_pool_backward_reshape(dout, real_cache)
elif (method == 'im2col'):
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError(('Unrecognized method "%s"' % method)) |
def max_pool_forward_reshape(x, pool_param):
'\n A fast implementation of the forward pass for the max pooling layer that uses\n some clever reshaping.\n\n This can only be used for square pooling regions that tile the input.\n '
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
assert (pool_height == pool_width == stride), 'Invalid pool params'
assert ((H % pool_height) == 0)
assert ((W % pool_height) == 0)
x_reshaped = x.reshape(N, C, (H / pool_height), pool_height, (W / pool_width), pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return (out, cache) | 1,881,753,171,530,993,200 | A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_forward_reshape | anandsaha/cs231n.assignments | python | def max_pool_forward_reshape(x, pool_param):
'\n A fast implementation of the forward pass for the max pooling layer that uses\n some clever reshaping.\n\n This can only be used for square pooling regions that tile the input.\n '
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
assert (pool_height == pool_width == stride), 'Invalid pool params'
assert ((H % pool_height) == 0)
assert ((W % pool_height) == 0)
x_reshaped = x.reshape(N, C, (H / pool_height), pool_height, (W / pool_width), pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return (out, cache) |
def max_pool_backward_reshape(dout, cache):
"\n A fast implementation of the backward pass for the max pooling layer that\n uses some clever broadcasting and reshaping.\n\n This can only be used if the forward pass was computed using\n max_pool_forward_reshape.\n\n NOTE: If there are multiple argmaxes, this method will assign gradient to\n ALL argmax elements of the input rather than picking one. In this case the\n gradient will actually be incorrect. However this is unlikely to occur in\n practice, so it shouldn't matter much. One possible solution is to split the\n upstream gradient equally among all argmax elements; this should result in a\n valid subgradient. You can make this happen by uncommenting the line below;\n however this results in a significant performance penalty (about 40% slower)\n and is unlikely to matter in practice so we don't do it.\n "
(x, x_reshaped, out) = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
(dout_broadcast, _) = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx | 3,009,217,698,752,915,000 | A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_backward_reshape | anandsaha/cs231n.assignments | python | def max_pool_backward_reshape(dout, cache):
"\n A fast implementation of the backward pass for the max pooling layer that\n uses some clever broadcasting and reshaping.\n\n This can only be used if the forward pass was computed using\n max_pool_forward_reshape.\n\n NOTE: If there are multiple argmaxes, this method will assign gradient to\n ALL argmax elements of the input rather than picking one. In this case the\n gradient will actually be incorrect. However this is unlikely to occur in\n practice, so it shouldn't matter much. One possible solution is to split the\n upstream gradient equally among all argmax elements; this should result in a\n valid subgradient. You can make this happen by uncommenting the line below;\n however this results in a significant performance penalty (about 40% slower)\n and is unlikely to matter in practice so we don't do it.\n "
(x, x_reshaped, out) = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
(dout_broadcast, _) = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx |
def max_pool_forward_im2col(x, pool_param):
"\n An implementation of the forward pass for max pooling based on im2col.\n\n This isn't much faster than the naive version, so it should be avoided if\n possible.\n "
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
assert (((H - pool_height) % stride) == 0), 'Invalid height'
assert (((W - pool_width) % stride) == 0), 'Invalid width'
out_height = (((H - pool_height) / stride) + 1)
out_width = (((W - pool_width) / stride) + 1)
x_split = x.reshape((N * C), 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[(x_cols_argmax, np.arange(x_cols.shape[1]))]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return (out, cache) | -5,186,576,365,797,148,000 | An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_forward_im2col | anandsaha/cs231n.assignments | python | def max_pool_forward_im2col(x, pool_param):
"\n An implementation of the forward pass for max pooling based on im2col.\n\n This isn't much faster than the naive version, so it should be avoided if\n possible.\n "
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
assert (((H - pool_height) % stride) == 0), 'Invalid height'
assert (((W - pool_width) % stride) == 0), 'Invalid width'
out_height = (((H - pool_height) / stride) + 1)
out_width = (((W - pool_width) / stride) + 1)
x_split = x.reshape((N * C), 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[(x_cols_argmax, np.arange(x_cols.shape[1]))]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return (out, cache) |
def max_pool_backward_im2col(dout, cache):
"\n An implementation of the backward pass for max pooling based on im2col.\n\n This isn't much faster than the naive version, so it should be avoided if\n possible.\n "
(x, x_cols, x_cols_argmax, pool_param) = cache
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[(x_cols_argmax, np.arange(dx_cols.shape[1]))] = dout_reshaped
dx = col2im_indices(dx_cols, ((N * C), 1, H, W), pool_height, pool_width, padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx | 8,171,853,346,095,398,000 | An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible. | 2016winter/assignment2/cs231n/fast_layers.py | max_pool_backward_im2col | anandsaha/cs231n.assignments | python | def max_pool_backward_im2col(dout, cache):
"\n An implementation of the backward pass for max pooling based on im2col.\n\n This isn't much faster than the naive version, so it should be avoided if\n possible.\n "
(x, x_cols, x_cols_argmax, pool_param) = cache
(N, C, H, W) = x.shape
(pool_height, pool_width) = (pool_param['pool_height'], pool_param['pool_width'])
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[(x_cols_argmax, np.arange(dx_cols.shape[1]))] = dout_reshaped
dx = col2im_indices(dx_cols, ((N * C), 1, H, W), pool_height, pool_width, padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx |
def _deserialize_session_json(serialized_json_str: bytes) -> str:
'\n Helper function to deserialize sessiond:sessions hash list values\n :param serialized_json_str\n '
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped | -1,428,529,131,481,541,400 | Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str | lte/gateway/python/scripts/state_cli.py | _deserialize_session_json | Rajpratik71/magma | python | def _deserialize_session_json(serialized_json_str: bytes) -> str:
'\n Helper function to deserialize sessiond:sessions hash list values\n :param serialized_json_str\n '
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped |
def _deserialize_generic_json(element: Union[(str, dict, list)]) -> Union[(str, dict, list)]:
'\n Helper function to deserialize dictionaries or list with nested\n json strings\n :param element\n '
if isinstance(element, str):
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element | -6,177,858,682,929,662,000 | Helper function to deserialize dictionaries or list with nested
json strings
:param element | lte/gateway/python/scripts/state_cli.py | _deserialize_generic_json | Rajpratik71/magma | python | def _deserialize_generic_json(element: Union[(str, dict, list)]) -> Union[(str, dict, list)]:
'\n Helper function to deserialize dictionaries or list with nested\n json strings\n :param element\n '
if isinstance(element, str):
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element |
def keys(self, redis_key: str):
'\n Get current keys on redis db that match the pattern\n\n Args:\n redis_key:pattern to match the redis keys\n\n '
for k in self.client.keys(pattern='{}*'.format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key) | 6,918,490,644,040,773,000 | Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys | lte/gateway/python/scripts/state_cli.py | keys | Rajpratik71/magma | python | def keys(self, redis_key: str):
'\n Get current keys on redis db that match the pattern\n\n Args:\n redis_key:pattern to match the redis keys\n\n '
for k in self.client.keys(pattern='{}*'.format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key) |
def parse(self, key: str):
'\n Parse value of redis key on redis for encoded HASH, SET types, or\n JSON / Protobuf encoded state-wrapped types and prints it\n\n Args:\n key: key on redis\n\n '
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if (':' in key):
key_type = key.split(':')[1]
if (redis_type == 'hash'):
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if (not deserializer):
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif (redis_type == 'set'):
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if (not deserializer):
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value) | 7,372,065,754,419,361,000 | Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis | lte/gateway/python/scripts/state_cli.py | parse | Rajpratik71/magma | python | def parse(self, key: str):
'\n Parse value of redis key on redis for encoded HASH, SET types, or\n JSON / Protobuf encoded state-wrapped types and prints it\n\n Args:\n key: key on redis\n\n '
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if (':' in key):
key_type = key.split(':')[1]
if (redis_type == 'hash'):
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if (not deserializer):
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif (redis_type == 'set'):
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if (not deserializer):
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value) |
def corrupt(self, key):
'\n Mostly used for debugging, purposely corrupts state encoded protobuf\n in redis, and writes it back to datastore\n\n Args:\n key: key on redis\n\n '
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print(('Corrupted %s in redis' % key)) | -9,030,612,015,798,433,000 | Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis | lte/gateway/python/scripts/state_cli.py | corrupt | Rajpratik71/magma | python | def corrupt(self, key):
'\n Mostly used for debugging, purposely corrupts state encoded protobuf\n in redis, and writes it back to datastore\n\n Args:\n key: key on redis\n\n '
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print(('Corrupted %s in redis' % key)) |
def mlp_discriminator(in_signal, non_linearity=tf.nn.relu, reuse=False, scope=None, b_norm=True, dropout_prob=None):
' used in nips submission.\n '
encoder_args = {'n_filters': [64, 128, 256, 256, 512], 'filter_sizes': [1, 1, 1, 1, 1], 'strides': [1, 1, 1, 1, 1]}
encoder_args['reuse'] = reuse
encoder_args['scope'] = scope
encoder_args['non_linearity'] = non_linearity
encoder_args['dropout_prob'] = dropout_prob
encoder_args['b_norm'] = b_norm
layer = encoder_with_convs_and_symmetry(in_signal, **encoder_args)
name = 'decoding_logits'
scope_e = expand_scope_by_name(scope, name)
d_logit = decoder_with_fc_only(layer, layer_sizes=[128, 64, 1], b_norm=b_norm, reuse=reuse, scope=scope_e)
d_prob = tf.nn.sigmoid(d_logit)
return (d_prob, d_logit) | -1,285,864,863,545,570,800 | used in nips submission. | src/generators_discriminators.py | mlp_discriminator | 15034458181/latent_3d_points | python | def mlp_discriminator(in_signal, non_linearity=tf.nn.relu, reuse=False, scope=None, b_norm=True, dropout_prob=None):
' \n '
encoder_args = {'n_filters': [64, 128, 256, 256, 512], 'filter_sizes': [1, 1, 1, 1, 1], 'strides': [1, 1, 1, 1, 1]}
encoder_args['reuse'] = reuse
encoder_args['scope'] = scope
encoder_args['non_linearity'] = non_linearity
encoder_args['dropout_prob'] = dropout_prob
encoder_args['b_norm'] = b_norm
layer = encoder_with_convs_and_symmetry(in_signal, **encoder_args)
name = 'decoding_logits'
scope_e = expand_scope_by_name(scope, name)
d_logit = decoder_with_fc_only(layer, layer_sizes=[128, 64, 1], b_norm=b_norm, reuse=reuse, scope=scope_e)
d_prob = tf.nn.sigmoid(d_logit)
return (d_prob, d_logit) |
def point_cloud_generator(z, pc_dims, layer_sizes=[64, 128, 512, 1024], non_linearity=tf.nn.relu, b_norm=False, b_norm_last=False, dropout_prob=None):
' used in nips submission.\n '
(n_points, dummy) = pc_dims
if (dummy != 3):
raise ValueError()
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm)
out_signal = non_linearity(out_signal)
if (dropout_prob is not None):
out_signal = dropout(out_signal, dropout_prob)
if b_norm_last:
out_signal = batch_normalization(out_signal)
out_signal = fully_connected(out_signal, np.prod([n_points, 3]), activation='linear', weights_init='xavier')
out_signal = tf.reshape(out_signal, [(- 1), n_points, 3])
return out_signal | 1,197,973,983,239,007,500 | used in nips submission. | src/generators_discriminators.py | point_cloud_generator | 15034458181/latent_3d_points | python | def point_cloud_generator(z, pc_dims, layer_sizes=[64, 128, 512, 1024], non_linearity=tf.nn.relu, b_norm=False, b_norm_last=False, dropout_prob=None):
' \n '
(n_points, dummy) = pc_dims
if (dummy != 3):
raise ValueError()
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm)
out_signal = non_linearity(out_signal)
if (dropout_prob is not None):
out_signal = dropout(out_signal, dropout_prob)
if b_norm_last:
out_signal = batch_normalization(out_signal)
out_signal = fully_connected(out_signal, np.prod([n_points, 3]), activation='linear', weights_init='xavier')
out_signal = tf.reshape(out_signal, [(- 1), n_points, 3])
return out_signal |
def latent_code_discriminator_two_layers(in_signal, layer_sizes=[256, 512], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None):
' Used in ICML submission.\n '
layer_sizes = (layer_sizes + [1])
d_logit = decoder_with_fc_only(in_signal, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm, reuse=reuse, scope=scope)
d_prob = tf.nn.sigmoid(d_logit)
return (d_prob, d_logit) | -5,338,603,358,653,517,000 | Used in ICML submission. | src/generators_discriminators.py | latent_code_discriminator_two_layers | 15034458181/latent_3d_points | python | def latent_code_discriminator_two_layers(in_signal, layer_sizes=[256, 512], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None):
' \n '
layer_sizes = (layer_sizes + [1])
d_logit = decoder_with_fc_only(in_signal, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm, reuse=reuse, scope=scope)
d_prob = tf.nn.sigmoid(d_logit)
return (d_prob, d_logit) |
def latent_code_generator_two_layers(z, out_dim, layer_sizes=[128], b_norm=False):
' Used in ICML submission.\n '
layer_sizes = (layer_sizes + out_dim)
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, b_norm=b_norm)
out_signal = tf.nn.relu(out_signal)
return out_signal | -8,565,647,461,382,028,000 | Used in ICML submission. | src/generators_discriminators.py | latent_code_generator_two_layers | 15034458181/latent_3d_points | python | def latent_code_generator_two_layers(z, out_dim, layer_sizes=[128], b_norm=False):
' \n '
layer_sizes = (layer_sizes + out_dim)
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, b_norm=b_norm)
out_signal = tf.nn.relu(out_signal)
return out_signal |
def maxNumber(self, nums1, nums2, k):
'\n :type nums1: List[int]\n :type nums2: List[int]\n :type k: int\n :rtype: List[int]\n '
def prep(nums, k):
dr = (len(nums) - k)
stay = []
for num in nums:
while (dr and stay and (stay[(- 1)] < num)):
stay.pop()
dr -= 1
stay.append(num)
return stay[:k]
def merge(x, y):
return [max(x, y).pop(0) for _ in (x + y)]
l1 = len(nums1)
l2 = len(nums2)
r = [0]
for i in range((k + 1)):
if ((i <= l1) and ((k - i) <= l2)):
r = max(merge(prep(nums1, i), prep(nums2, (k - i))), r)
return r | -3,527,442,808,947,760,600 | :type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int] | src/321. Create Maximum Number.py | maxNumber | wisesky/LeetCode-Practice | python | def maxNumber(self, nums1, nums2, k):
'\n :type nums1: List[int]\n :type nums2: List[int]\n :type k: int\n :rtype: List[int]\n '
def prep(nums, k):
dr = (len(nums) - k)
stay = []
for num in nums:
while (dr and stay and (stay[(- 1)] < num)):
stay.pop()
dr -= 1
stay.append(num)
return stay[:k]
def merge(x, y):
return [max(x, y).pop(0) for _ in (x + y)]
l1 = len(nums1)
l2 = len(nums2)
r = [0]
for i in range((k + 1)):
if ((i <= l1) and ((k - i) <= l2)):
r = max(merge(prep(nums1, i), prep(nums2, (k - i))), r)
return r |
def dbdirname(db, rc):
'Gets the database dir name.'
if (db.get('local', False) is False):
dbsdir = os.path.join(rc.builddir, '_dbs')
dbdir = os.path.join(dbsdir, db['name'])
else:
dbdir = db['url']
return dbdir | 2,476,350,037,564,095,500 | Gets the database dir name. | regolith/tools.py | dbdirname | jc-umana/regolith | python | def dbdirname(db, rc):
if (db.get('local', False) is False):
dbsdir = os.path.join(rc.builddir, '_dbs')
dbdir = os.path.join(dbsdir, db['name'])
else:
dbdir = db['url']
return dbdir |
def dbpathname(db, rc):
'Gets the database path name.'
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db['path'])
return dbpath | 8,634,322,216,610,822,000 | Gets the database path name. | regolith/tools.py | dbpathname | jc-umana/regolith | python | def dbpathname(db, rc):
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db['path'])
return dbpath |
def fallback(cond, backup):
'Decorator for returning the object if cond is true and a backup if\n cond is false. '
def dec(obj):
return (obj if cond else backup)
return dec | -7,055,294,061,139,205,000 | Decorator for returning the object if cond is true and a backup if
cond is false. | regolith/tools.py | fallback | jc-umana/regolith | python | def fallback(cond, backup):
'Decorator for returning the object if cond is true and a backup if\n cond is false. '
def dec(obj):
return (obj if cond else backup)
return dec |
def all_docs_from_collection(client, collname, copy=True):
'Yield all entries in for all collections of a given name in a given\n database. '
(yield from client.all_documents(collname, copy=copy)) | 4,186,418,632,788,220,000 | Yield all entries in for all collections of a given name in a given
database. | regolith/tools.py | all_docs_from_collection | jc-umana/regolith | python | def all_docs_from_collection(client, collname, copy=True):
'Yield all entries in for all collections of a given name in a given\n database. '
(yield from client.all_documents(collname, copy=copy)) |
def date_to_rfc822(y, m, d=1):
'Converts a date to an RFC 822 formatted string.'
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d) | 9,035,443,693,733,744,000 | Converts a date to an RFC 822 formatted string. | regolith/tools.py | date_to_rfc822 | jc-umana/regolith | python | def date_to_rfc822(y, m, d=1):
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d) |
def rfc822now():
'Creates a string of the current time according to RFC 822.'
now = datetime.utcnow()
return email.utils.format_datetime(now) | 8,775,209,818,638,581,000 | Creates a string of the current time according to RFC 822. | regolith/tools.py | rfc822now | jc-umana/regolith | python | def rfc822now():
now = datetime.utcnow()
return email.utils.format_datetime(now) |
def gets(seq, key, default=None):
'Gets a key from every element of a sequence if possible.'
for x in seq:
(yield x.get(key, default)) | 2,666,311,277,258,468,400 | Gets a key from every element of a sequence if possible. | regolith/tools.py | gets | jc-umana/regolith | python | def gets(seq, key, default=None):
for x in seq:
(yield x.get(key, default)) |
def month_and_year(m=None, y=None):
'Creates a string from month and year data, if available.'
if (y is None):
return 'present'
if (m is None):
return str(y)
m = month_to_int(m)
return '{0} {1}'.format(SHORT_MONTH_NAMES[m], y) | 13,686,799,063,267,366 | Creates a string from month and year data, if available. | regolith/tools.py | month_and_year | jc-umana/regolith | python | def month_and_year(m=None, y=None):
if (y is None):
return 'present'
if (m is None):
return str(y)
m = month_to_int(m)
return '{0} {1}'.format(SHORT_MONTH_NAMES[m], y) |
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
'\n tests whether a date is on or since another date\n\n Parameters\n ----------\n y : int\n the year to be tested\n sy : int\n the since year\n m : int or str\n the month to be tested. Optional, defaults to Jan\n d : int\n the day to be tested. Defaults to 1\n sm : int or str\n the since month. Optional, defaults to Jan\n sd: int\n the since day. Optional, defaults to 1\n\n Returns\n -------\n True if the target date is the same as, or more recent than, the since date\n\n '
s = '{}/{}/{}'.format(sd, month_to_int(sm), sy)
d = '{}/{}/{}'.format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, '%d/%m/%Y').timetuple())
date = time.mktime(datetime.strptime(d, '%d/%m/%Y').timetuple())
return (since <= date) | -8,613,150,611,452,172,000 | tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date | regolith/tools.py | is_since | jc-umana/regolith | python | def is_since(y, sy, m=1, d=1, sm=1, sd=1):
'\n tests whether a date is on or since another date\n\n Parameters\n ----------\n y : int\n the year to be tested\n sy : int\n the since year\n m : int or str\n the month to be tested. Optional, defaults to Jan\n d : int\n the day to be tested. Defaults to 1\n sm : int or str\n the since month. Optional, defaults to Jan\n sd: int\n the since day. Optional, defaults to 1\n\n Returns\n -------\n True if the target date is the same as, or more recent than, the since date\n\n '
s = '{}/{}/{}'.format(sd, month_to_int(sm), sy)
d = '{}/{}/{}'.format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, '%d/%m/%Y').timetuple())
date = time.mktime(datetime.strptime(d, '%d/%m/%Y').timetuple())
return (since <= date) |
def is_before(y, by, m=12, d=None, bm=12, bd=None):
'\n tests whether a date is on or before another date\n\n Parameters\n ----------\n y : int\n the year to be tested\n by : int\n the before year\n m : int or str\n the month to be tested. Optional, defaults to Dec\n d : int\n the day to be tested. Defaults to last day of the month\n bm : int or str\n the before month. Optional, defaults to Dec\n bd: int\n the before day. Optional, defaults to last day of the month\n\n Returns\n -------\n True if the target date is the same as, or earlier than, the before date\n\n '
if (not d):
d = monthrange(y, month_to_int(m))[1]
if (not bd):
bd = monthrange(by, month_to_int(bm))[1]
b = '{}/{}/{}'.format(bd, month_to_int(bm), by)
d = '{}/{}/{}'.format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, '%d/%m/%Y').timetuple())
date = time.mktime(datetime.strptime(d, '%d/%m/%Y').timetuple())
return (before >= date) | -1,713,290,861,960,713,700 | tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date | regolith/tools.py | is_before | jc-umana/regolith | python | def is_before(y, by, m=12, d=None, bm=12, bd=None):
'\n tests whether a date is on or before another date\n\n Parameters\n ----------\n y : int\n the year to be tested\n by : int\n the before year\n m : int or str\n the month to be tested. Optional, defaults to Dec\n d : int\n the day to be tested. Defaults to last day of the month\n bm : int or str\n the before month. Optional, defaults to Dec\n bd: int\n the before day. Optional, defaults to last day of the month\n\n Returns\n -------\n True if the target date is the same as, or earlier than, the before date\n\n '
if (not d):
d = monthrange(y, month_to_int(m))[1]
if (not bd):
bd = monthrange(by, month_to_int(bm))[1]
b = '{}/{}/{}'.format(bd, month_to_int(bm), by)
d = '{}/{}/{}'.format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, '%d/%m/%Y').timetuple())
date = time.mktime(datetime.strptime(d, '%d/%m/%Y').timetuple())
return (before >= date) |
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
'\n tests whether a date is on or between two other dates\n\n returns true if the target date is between the since date and the before\n date, inclusive.\n\n Parameters\n ----------\n y : int\n the year to be tested\n sy : int\n the since year\n by : int\n the before year\n m : int or str\n the month to be tested. Optional, defaults to Jan\n d : int\n the day to be tested. Defaults to 1\n sm : int or str\n the since month. Optional, defaults to Jan\n bm : int or str\n the before month. Optional, defaults to Dec\n sd: int\n the since day. Optional, defaults to 1\n bd: int\n the before day. Optional, defaults to 28\n\n Returns\n -------\n True if the target date is between the since date and the before date,\n inclusive (i.e., returns true if the target date is the same as either the\n since date or the before date)\n\n '
if (not bd):
bd = monthrange(by, month_to_int(bm))[1]
return (is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(y, by, m=m, d=d, bm=bm, bd=bd)) | 2,456,960,928,022,609,000 | tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date) | regolith/tools.py | is_between | jc-umana/regolith | python | def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
'\n tests whether a date is on or between two other dates\n\n returns true if the target date is between the since date and the before\n date, inclusive.\n\n Parameters\n ----------\n y : int\n the year to be tested\n sy : int\n the since year\n by : int\n the before year\n m : int or str\n the month to be tested. Optional, defaults to Jan\n d : int\n the day to be tested. Defaults to 1\n sm : int or str\n the since month. Optional, defaults to Jan\n bm : int or str\n the before month. Optional, defaults to Dec\n sd: int\n the since day. Optional, defaults to 1\n bd: int\n the before day. Optional, defaults to 28\n\n Returns\n -------\n True if the target date is between the since date and the before date,\n inclusive (i.e., returns true if the target date is the same as either the\n since date or the before date)\n\n '
if (not bd):
bd = monthrange(by, month_to_int(bm))[1]
return (is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(y, by, m=m, d=d, bm=bm, bd=bd)) |
def has_started(sy, sm=None, sd=None):
'\n true if today is after the dates given, inclusive\n\n Parameters\n ----------\n sy : int\n the year to check today against\n sm : int or str.\n the month to check today against. Should be integer or in regolith MONTHS.\n default is 1\n sd : int.\n the day to check today against. Default is 1\n\n Returns\n -------\n bool\n true if today is after dates given\n '
if (not sm):
sm = 1
if (not sd):
sd = 1
s = '{}/{}/{}'.format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, '%d/%m/%Y').timetuple())
return (start <= time.time()) | 878,695,831,259,082,100 | true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given | regolith/tools.py | has_started | jc-umana/regolith | python | def has_started(sy, sm=None, sd=None):
'\n true if today is after the dates given, inclusive\n\n Parameters\n ----------\n sy : int\n the year to check today against\n sm : int or str.\n the month to check today against. Should be integer or in regolith MONTHS.\n default is 1\n sd : int.\n the day to check today against. Default is 1\n\n Returns\n -------\n bool\n true if today is after dates given\n '
if (not sm):
sm = 1
if (not sd):
sd = 1
s = '{}/{}/{}'.format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, '%d/%m/%Y').timetuple())
return (start <= time.time()) |
def has_finished(ey, em=None, ed=None):
'\n true if today is before the dates given, inclusive\n\n Parameters\n ----------\n ey : int\n end year, the year to check today against\n em : int or str.\n end month, the month to check today against. Should be integer or in regolith MONTHS.\n default is 1\n ed : int.\n end-day, the day to check today against. Default is last day of the month\n\n Returns\n -------\n bool\n true if today is before dates given\n '
if (not em):
em = 12
if (not ed):
ed = monthrange(ey, month_to_int(em))[1]
e = '{}/{}/{}'.format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, '%d/%m/%Y').timetuple())
return (end <= time.time()) | -2,508,334,573,039,119,000 | true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given | regolith/tools.py | has_finished | jc-umana/regolith | python | def has_finished(ey, em=None, ed=None):
'\n true if today is before the dates given, inclusive\n\n Parameters\n ----------\n ey : int\n end year, the year to check today against\n em : int or str.\n end month, the month to check today against. Should be integer or in regolith MONTHS.\n default is 1\n ed : int.\n end-day, the day to check today against. Default is last day of the month\n\n Returns\n -------\n bool\n true if today is before dates given\n '
if (not em):
em = 12
if (not ed):
ed = monthrange(ey, month_to_int(em))[1]
e = '{}/{}/{}'.format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, '%d/%m/%Y').timetuple())
return (end <= time.time()) |
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
'\n true if today is between the dates given, inclusive\n\n Parameters\n ----------\n sy : int\n start year, the year to check today is after\n ey : int\n end year, the year to check today is before\n sm : int or str\n start month, the month to check today is after. Should be integer or in\n regolith MONTHS. Default is 1\n sd : int\n start day, the day to check today after. Default is 1\n em : int or str.\n end month, the month to check today against. Should be integer or in\n regolith MONTHS. Default is 12\n ed : int.\n end-day, the day to check today against. Default is last day of the month\n\n Returns\n -------\n bool\n true if today is before dates given\n '
return (has_started(sy, sm, sd) and (not has_finished(ey, em, ed))) | 6,748,978,171,058,761,000 | true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given | regolith/tools.py | is_current | jc-umana/regolith | python | def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
'\n true if today is between the dates given, inclusive\n\n Parameters\n ----------\n sy : int\n start year, the year to check today is after\n ey : int\n end year, the year to check today is before\n sm : int or str\n start month, the month to check today is after. Should be integer or in\n regolith MONTHS. Default is 1\n sd : int\n start day, the day to check today after. Default is 1\n em : int or str.\n end month, the month to check today against. Should be integer or in\n regolith MONTHS. Default is 12\n ed : int.\n end-day, the day to check today against. Default is last day of the month\n\n Returns\n -------\n bool\n true if today is before dates given\n '
return (has_started(sy, sm, sd) and (not has_finished(ey, em, ed))) |
def filter_publications(citations, authors, reverse=False, bold=True):
'Filter publications by the author(s)/editor(s)\n\n Parameters\n ----------\n citations : list of dict\n The publication citations\n authors : set of str\n The authors to be filtered against\n reverse : bool, optional\n If True reverse the order, defaults to False\n bold : bool, optional\n If True put latex bold around the author(s) in question\n '
pubs = []
for pub in citations:
if (len(((set(pub.get('author', [])) | set(pub.get('editor', []))) & authors)) == 0):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub['author']:
if (a in authors):
bold_self.append((('\\textbf{' + a) + '}'))
else:
bold_self.append(a)
pub['author'] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs | 9,065,536,602,894,445,000 | Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question | regolith/tools.py | filter_publications | jc-umana/regolith | python | def filter_publications(citations, authors, reverse=False, bold=True):
'Filter publications by the author(s)/editor(s)\n\n Parameters\n ----------\n citations : list of dict\n The publication citations\n authors : set of str\n The authors to be filtered against\n reverse : bool, optional\n If True reverse the order, defaults to False\n bold : bool, optional\n If True put latex bold around the author(s) in question\n '
pubs = []
for pub in citations:
if (len(((set(pub.get('author', [])) | set(pub.get('editor', []))) & authors)) == 0):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub['author']:
if (a in authors):
bold_self.append((('\\textbf{' + a) + '}'))
else:
bold_self.append(a)
pub['author'] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs |
def filter_projects(projects, authors, reverse=False):
'Filter projects by the author(s)\n\n Parameters\n ----------\n projects : list of dict\n The publication citations\n authors : set of list of str\n The authors to be filtered against\n reverse : bool, optional\n If True reverse the order, defaults to False\n '
projs = []
for proj in projects:
team_names = set(gets(proj['team'], 'name'))
if (len((team_names & authors)) == 0):
continue
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs | -185,094,831,037,862,050 | Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False | regolith/tools.py | filter_projects | jc-umana/regolith | python | def filter_projects(projects, authors, reverse=False):
'Filter projects by the author(s)\n\n Parameters\n ----------\n projects : list of dict\n The publication citations\n authors : set of list of str\n The authors to be filtered against\n reverse : bool, optional\n If True reverse the order, defaults to False\n '
projs = []
for proj in projects:
team_names = set(gets(proj['team'], 'name'))
if (len((team_names & authors)) == 0):
continue
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs |
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"Filter grants by those involved\n\n Parameters\n ----------\n input_grants : list of dict\n The grants to filter\n names : set of str\n The authors to be filtered against\n pi : bool, optional\n If True add the grant amount to that person's total amount\n reverse : bool, optional\n If True reverse the order, defaults to False\n multi_pi : bool, optional\n If True compute sub-awards for multi PI grants, defaults to False\n "
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant['team'], 'name'))
if (len((team_names & names)) == 0):
continue
grant = deepcopy(grant)
person = [x for x in grant['team'] if (x['name'] in names)][0]
if pi:
if (person['position'].lower() == 'pi'):
total_amount += grant['amount']
else:
continue
elif multi_pi:
grant['subaward_amount'] = person.get('subaward_amount', 0.0)
grant['multi_pi'] = any(gets(grant['team'], 'subaward_amount'))
elif (person['position'].lower() == 'pi'):
continue
else:
total_amount += grant['amount']
subaward_amount += person.get('subaward_amount', 0.0)
grant['subaward_amount'] = person.get('subaward_amount', 0.0)
grant['pi'] = [x for x in grant['team'] if (x['position'].lower() == 'pi')][0]
grant['me'] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return (grants, total_amount, subaward_amount) | 8,310,129,862,167,256,000 | Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False | regolith/tools.py | filter_grants | jc-umana/regolith | python | def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"Filter grants by those involved\n\n Parameters\n ----------\n input_grants : list of dict\n The grants to filter\n names : set of str\n The authors to be filtered against\n pi : bool, optional\n If True add the grant amount to that person's total amount\n reverse : bool, optional\n If True reverse the order, defaults to False\n multi_pi : bool, optional\n If True compute sub-awards for multi PI grants, defaults to False\n "
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant['team'], 'name'))
if (len((team_names & names)) == 0):
continue
grant = deepcopy(grant)
person = [x for x in grant['team'] if (x['name'] in names)][0]
if pi:
if (person['position'].lower() == 'pi'):
total_amount += grant['amount']
else:
continue
elif multi_pi:
grant['subaward_amount'] = person.get('subaward_amount', 0.0)
grant['multi_pi'] = any(gets(grant['team'], 'subaward_amount'))
elif (person['position'].lower() == 'pi'):
continue
else:
total_amount += grant['amount']
subaward_amount += person.get('subaward_amount', 0.0)
grant['subaward_amount'] = person.get('subaward_amount', 0.0)
grant['pi'] = [x for x in grant['team'] if (x['position'].lower() == 'pi')][0]
grant['me'] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return (grants, total_amount, subaward_amount) |
def awards_grants_honors(p):
'Make sorted awards grants and honors list.\n\n Parameters\n ----------\n p : dict\n The person entry\n '
aghs = []
for x in p.get('funding', ()):
d = {'description': '{0} ({1}{2:,})'.format(latex_safe(x['name']), x.get('currency', '$').replace('$', '\\$'), x['value']), 'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0))}
aghs.append(d)
for x in (p.get('service', []) + p.get('honors', [])):
d = {'description': latex_safe(x['name'])}
if ('year' in x):
d.update({'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0))})
elif (('begin_year' in x) and ('end_year' in x)):
d.update({'year': '{}-{}'.format(x['begin_year'], x['end_year']), '_key': date_to_float(x['begin_year'], x.get('month', 0))})
elif ('begin_year' in x):
d.update({'year': '{}'.format(x['begin_year']), '_key': date_to_float(x['begin_year'], x.get('month', 0))})
aghs.append(d)
aghs.sort(key=(lambda x: x.get('_key', 0.0)), reverse=True)
return aghs | 6,206,872,766,831,139,000 | Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry | regolith/tools.py | awards_grants_honors | jc-umana/regolith | python | def awards_grants_honors(p):
'Make sorted awards grants and honors list.\n\n Parameters\n ----------\n p : dict\n The person entry\n '
aghs = []
for x in p.get('funding', ()):
d = {'description': '{0} ({1}{2:,})'.format(latex_safe(x['name']), x.get('currency', '$').replace('$', '\\$'), x['value']), 'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0))}
aghs.append(d)
for x in (p.get('service', []) + p.get('honors', [])):
d = {'description': latex_safe(x['name'])}
if ('year' in x):
d.update({'year': x['year'], '_key': date_to_float(x['year'], x.get('month', 0))})
elif (('begin_year' in x) and ('end_year' in x)):
d.update({'year': '{}-{}'.format(x['begin_year'], x['end_year']), '_key': date_to_float(x['begin_year'], x.get('month', 0))})
elif ('begin_year' in x):
d.update({'year': '{}'.format(x['begin_year']), '_key': date_to_float(x['begin_year'], x.get('month', 0))})
aghs.append(d)
aghs.sort(key=(lambda x: x.get('_key', 0.0)), reverse=True)
return aghs |
def latex_safe_url(s):
'Makes a string that is a URL latex safe.'
return s.replace('#', '\\#') | 6,571,294,783,630,191,000 | Makes a string that is a URL latex safe. | regolith/tools.py | latex_safe_url | jc-umana/regolith | python | def latex_safe_url(s):
return s.replace('#', '\\#') |
def latex_safe(s, url_check=True, wrapper='url'):
"Make string latex safe\n\n Parameters\n ----------\n s : str\n url_check : bool, optional\n If True check for URLs and wrap them, if False check for URL but don't\n wrap, defaults to True\n wrapper : str, optional\n The wrapper for wrapping urls defaults to url\n "
if (not s):
return s
if url_check:
url_search = HTTP_RE.search(s)
if url_search:
url = '{start}\\{wrapper}{{{s}}}{end}'.format(start=latex_safe(s[:url_search.start()]), end=latex_safe(s[url_search.end():]), wrapper=wrapper, s=latex_safe_url(s[url_search.start():url_search.end()]))
return url
return s.replace('&', '\\&').replace('$', '\\$').replace('#', '\\#').replace('_', '\\_') | 7,536,975,964,445,699,000 | Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url | regolith/tools.py | latex_safe | jc-umana/regolith | python | def latex_safe(s, url_check=True, wrapper='url'):
"Make string latex safe\n\n Parameters\n ----------\n s : str\n url_check : bool, optional\n If True check for URLs and wrap them, if False check for URL but don't\n wrap, defaults to True\n wrapper : str, optional\n The wrapper for wrapping urls defaults to url\n "
if (not s):
return s
if url_check:
url_search = HTTP_RE.search(s)
if url_search:
url = '{start}\\{wrapper}{{{s}}}{end}'.format(start=latex_safe(s[:url_search.start()]), end=latex_safe(s[url_search.end():]), wrapper=wrapper, s=latex_safe_url(s[url_search.start():url_search.end()]))
return url
return s.replace('&', '\\&').replace('$', '\\$').replace('#', '\\#').replace('_', '\\_') |
def make_bibtex_file(pubs, pid, person_dir='.'):
"Make a bibtex file given the publications\n\n Parameters\n ----------\n pubs : list of dict\n The publications\n pid : str\n The person id\n person_dir : str, optional\n The person's directory\n "
if (not HAVE_BIBTEX_PARSER):
return None
skip_keys = {'ID', 'ENTRYTYPE', 'author'}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent['ID'] = ent.pop('_id')
ent['ENTRYTYPE'] = ent.pop('entrytype')
for n in ['author', 'editor']:
if (n in ent):
ent[n] = ' and '.join(ent[n])
for key in ent.keys():
if (key in skip_keys):
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = (os.path.join(person_dir, pid) + '.bib')
with open(fname, 'w', encoding='utf-8') as f:
f.write(bibwriter.write(bibdb))
return fname | 7,067,784,381,179,065,000 | Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory | regolith/tools.py | make_bibtex_file | jc-umana/regolith | python | def make_bibtex_file(pubs, pid, person_dir='.'):
"Make a bibtex file given the publications\n\n Parameters\n ----------\n pubs : list of dict\n The publications\n pid : str\n The person id\n person_dir : str, optional\n The person's directory\n "
if (not HAVE_BIBTEX_PARSER):
return None
skip_keys = {'ID', 'ENTRYTYPE', 'author'}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent['ID'] = ent.pop('_id')
ent['ENTRYTYPE'] = ent.pop('entrytype')
for n in ['author', 'editor']:
if (n in ent):
ent[n] = ' and '.join(ent[n])
for key in ent.keys():
if (key in skip_keys):
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = (os.path.join(person_dir, pid) + '.bib')
with open(fname, 'w', encoding='utf-8') as f:
f.write(bibwriter.write(bibdb))
return fname |
def document_by_value(documents, address, value):
'Get a specific document by one of its values\n\n Parameters\n ----------\n documents: generator\n Generator which yields the documents\n address: str or tuple\n The address of the data in the document\n value: any\n The expected value for the document\n\n Returns\n -------\n dict:\n The first document which matches the request\n '
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if (doc == value):
return g_doc | -6,065,343,237,551,224,000 | Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request | regolith/tools.py | document_by_value | jc-umana/regolith | python | def document_by_value(documents, address, value):
'Get a specific document by one of its values\n\n Parameters\n ----------\n documents: generator\n Generator which yields the documents\n address: str or tuple\n The address of the data in the document\n value: any\n The expected value for the document\n\n Returns\n -------\n dict:\n The first document which matches the request\n '
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if (doc == value):
return g_doc |
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"Retrieve a document from the documents where value is compared against\n multiple potential sources\n\n Parameters\n ----------\n documents: generator\n The documents\n sources: iterable\n The potential data sources\n value:\n The value to compare against to find the document of interest\n case_sensitive: Bool\n When true will match case (Default = True)\n\n Returns\n -------\n dict:\n The document\n\n Examples\n --------\n >>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)\n\n This would get the person entry for which either the alias or the name was\n ``pi_name``.\n\n "
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if (not isinstance(ret, list)):
ret = [ret]
returns.extend(ret)
if (not case_sensitive):
returns = [reti.lower() for reti in returns if isinstance(reti, str)]
if isinstance(value, str):
if (value.lower() in frozenset(returns)):
return doc
elif (value in frozenset(returns)):
return doc | 2,397,259,811,125,256,700 | Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``. | regolith/tools.py | fuzzy_retrieval | jc-umana/regolith | python | def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"Retrieve a document from the documents where value is compared against\n multiple potential sources\n\n Parameters\n ----------\n documents: generator\n The documents\n sources: iterable\n The potential data sources\n value:\n The value to compare against to find the document of interest\n case_sensitive: Bool\n When true will match case (Default = True)\n\n Returns\n -------\n dict:\n The document\n\n Examples\n --------\n >>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)\n\n This would get the person entry for which either the alias or the name was\n ``pi_name``.\n\n "
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if (not isinstance(ret, list)):
ret = [ret]
returns.extend(ret)
if (not case_sensitive):
returns = [reti.lower() for reti in returns if isinstance(reti, str)]
if isinstance(value, str):
if (value.lower() in frozenset(returns)):
return doc
elif (value in frozenset(returns)):
return doc |
def number_suffix(number):
'returns the suffix that adjectivises a number (st, nd, rd, th)\n\n Paramters\n ---------\n number: integer\n The number. If number is not an integer, returns an empty string\n\n Returns\n -------\n suffix: string\n The suffix (st, nd, rd, th)\n '
if (not isinstance(number, (int, float))):
return ''
if (10 < number < 20):
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get((number % 10), 'th')
return suffix | -1,537,146,430,695,771,000 | returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th) | regolith/tools.py | number_suffix | jc-umana/regolith | python | def number_suffix(number):
'returns the suffix that adjectivises a number (st, nd, rd, th)\n\n Paramters\n ---------\n number: integer\n The number. If number is not an integer, returns an empty string\n\n Returns\n -------\n suffix: string\n The suffix (st, nd, rd, th)\n '
if (not isinstance(number, (int, float))):
return
if (10 < number < 20):
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get((number % 10), 'th')
return suffix |
def dereference_institution(input_record, institutions):
'Tool for replacing placeholders for institutions with the actual\n institution data. Note that the replacement is done inplace\n\n Parameters\n ----------\n input_record : dict\n The record to dereference\n institutions : iterable of dicts\n The institutions\n '
inst = (input_record.get('institution') or input_record.get('organization'))
if (not inst):
error = (input_record.get('position') or input_record.get('degree'))
print('WARNING: no institution or organization but found {}'.format(error))
db_inst = fuzzy_retrieval(institutions, ['name', '_id', 'aka'], inst)
if db_inst:
input_record['institution'] = db_inst['name']
input_record['organization'] = db_inst['name']
if (db_inst.get('country') == 'USA'):
state_country = db_inst.get('state')
else:
state_country = db_inst.get('country')
input_record['location'] = '{}, {}'.format(db_inst['city'], state_country)
if (not db_inst.get('departments')):
print('WARNING: no departments in {}. {} sought'.format(db_inst.get('_id'), inst))
if (('department' in input_record) and db_inst.get('departments')):
input_record['department'] = fuzzy_retrieval([db_inst['departments']], ['name', 'aka'], input_record['department'])
else:
input_record['department'] = inst | -8,456,008,445,291,992,000 | Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions | regolith/tools.py | dereference_institution | jc-umana/regolith | python | def dereference_institution(input_record, institutions):
'Tool for replacing placeholders for institutions with the actual\n institution data. Note that the replacement is done inplace\n\n Parameters\n ----------\n input_record : dict\n The record to dereference\n institutions : iterable of dicts\n The institutions\n '
inst = (input_record.get('institution') or input_record.get('organization'))
if (not inst):
error = (input_record.get('position') or input_record.get('degree'))
print('WARNING: no institution or organization but found {}'.format(error))
db_inst = fuzzy_retrieval(institutions, ['name', '_id', 'aka'], inst)
if db_inst:
input_record['institution'] = db_inst['name']
input_record['organization'] = db_inst['name']
if (db_inst.get('country') == 'USA'):
state_country = db_inst.get('state')
else:
state_country = db_inst.get('country')
input_record['location'] = '{}, {}'.format(db_inst['city'], state_country)
if (not db_inst.get('departments')):
print('WARNING: no departments in {}. {} sought'.format(db_inst.get('_id'), inst))
if (('department' in input_record) and db_inst.get('departments')):
input_record['department'] = fuzzy_retrieval([db_inst['departments']], ['name', 'aka'], input_record['department'])
else:
input_record['department'] = inst |
def merge_collections(a, b, target_id):
'\n merge two collections into a single merged collection\n\n for keys that are in both collections, the value in b will be kept\n\n Parameters\n ----------\n a the inferior collection (will lose values of shared keys)\n b the superior collection (will keep values of shared keys)\n target_id str the name of the key used in b to dereference ids in a\n\n Returns\n -------\n the combined collection. Note that it returns a collection only containing\n merged items from a and b that are dereferenced in b, i.e., the merged\n intercept. If you want the union you can update the returned collection\n with a.\n\n Examples\n --------\n >>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")\n\n This would merge all entries in the proposals collection with entries in the\n grants collection for which "_id" in proposals has the value of\n "proposal_id" in grants.\n '
adict = {}
for k in a:
adict[k.get('_id')] = k
bdict = {}
for k in b:
bdict[k.get('_id')] = k
b_for_a = {}
for k in adict:
for (kk, v) in bdict.items():
if (v.get(target_id, '') == k):
b_for_a[k] = kk
chained = {}
for (k, v) in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values()) | 101,764,650,393,760,800 | merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants. | regolith/tools.py | merge_collections | jc-umana/regolith | python | def merge_collections(a, b, target_id):
'\n merge two collections into a single merged collection\n\n for keys that are in both collections, the value in b will be kept\n\n Parameters\n ----------\n a the inferior collection (will lose values of shared keys)\n b the superior collection (will keep values of shared keys)\n target_id str the name of the key used in b to dereference ids in a\n\n Returns\n -------\n the combined collection. Note that it returns a collection only containing\n merged items from a and b that are dereferenced in b, i.e., the merged\n intercept. If you want the union you can update the returned collection\n with a.\n\n Examples\n --------\n >>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")\n\n This would merge all entries in the proposals collection with entries in the\n grants collection for which "_id" in proposals has the value of\n "proposal_id" in grants.\n '
adict = {}
for k in a:
adict[k.get('_id')] = k
bdict = {}
for k in b:
bdict[k.get('_id')] = k
b_for_a = {}
for k in adict:
for (kk, v) in bdict.items():
if (v.get(target_id, ) == k):
b_for_a[k] = kk
chained = {}
for (k, v) in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values()) |
def update_schemas(default_schema, user_schema):
'\n Merging the user schema into the default schema recursively and return the\n merged schema. The default schema and user schema will not be modified\n during the merging.\n\n Parameters\n ----------\n default_schema : dict\n The default schema.\n user_schema : dict\n The user defined schema.\n\n Returns\n -------\n updated_schema : dict\n The merged schema.\n '
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if ((key in updated_schema) and isinstance(updated_schema[key], dict) and isinstance(user_schema[key], dict)):
updated_schema[key] = update_schemas(updated_schema[key], user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema | -8,252,402,807,327,927,000 | Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema. | regolith/tools.py | update_schemas | jc-umana/regolith | python | def update_schemas(default_schema, user_schema):
'\n Merging the user schema into the default schema recursively and return the\n merged schema. The default schema and user schema will not be modified\n during the merging.\n\n Parameters\n ----------\n default_schema : dict\n The default schema.\n user_schema : dict\n The user defined schema.\n\n Returns\n -------\n updated_schema : dict\n The merged schema.\n '
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if ((key in updated_schema) and isinstance(updated_schema[key], dict) and isinstance(user_schema[key], dict)):
updated_schema[key] = update_schemas(updated_schema[key], user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema |
def group(db, by):
'\n Group the document in the database according to the value of the doc[by] in db.\n\n Parameters\n ----------\n db : iterable\n The database of documents.\n by : basestring\n The key to group the documents.\n\n Returns\n -------\n grouped: dict\n A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have\n the same value of doc[by].\n\n Examples\n --------\n Here, we use a tuple of dict as an example of the database.\n >>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})\n >>> group(db)\n This will return\n >>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}\n '
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if (not key):
print('There is no field {} in {}'.format(by, id_key(doc)))
elif (key not in grouped):
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped | -8,126,863,572,966,259,000 | Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]} | regolith/tools.py | group | jc-umana/regolith | python | def group(db, by):
'\n Group the document in the database according to the value of the doc[by] in db.\n\n Parameters\n ----------\n db : iterable\n The database of documents.\n by : basestring\n The key to group the documents.\n\n Returns\n -------\n grouped: dict\n A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have\n the same value of doc[by].\n\n Examples\n --------\n Here, we use a tuple of dict as an example of the database.\n >>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})\n >>> group(db)\n This will return\n >>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}\n '
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if (not key):
print('There is no field {} in {}'.format(by, id_key(doc)))
elif (key not in grouped):
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped |
def get_pi_id(rc):
"\n Gets the database id of the group PI\n\n Parameters\n ----------\n rc: runcontrol object\n The runcontrol object. It must contain the 'groups' and 'people'\n collections in the needed databases\n\n Returns\n -------\n The database '_id' of the group PI\n\n "
groupiter = list(all_docs_from_collection(rc.client, 'groups'))
peoplecoll = all_docs_from_collection(rc.client, 'people')
pi_ref = [i.get('pi_name') for i in groupiter if (i.get('name').casefold() == rc.groupname.casefold())]
pi = fuzzy_retrieval(peoplecoll, ['_id', 'aka', 'name'], pi_ref[0])
return pi.get('_id') | 3,363,426,960,476,733,000 | Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI | regolith/tools.py | get_pi_id | jc-umana/regolith | python | def get_pi_id(rc):
"\n Gets the database id of the group PI\n\n Parameters\n ----------\n rc: runcontrol object\n The runcontrol object. It must contain the 'groups' and 'people'\n collections in the needed databases\n\n Returns\n -------\n The database '_id' of the group PI\n\n "
groupiter = list(all_docs_from_collection(rc.client, 'groups'))
peoplecoll = all_docs_from_collection(rc.client, 'people')
pi_ref = [i.get('pi_name') for i in groupiter if (i.get('name').casefold() == rc.groupname.casefold())]
pi = fuzzy_retrieval(peoplecoll, ['_id', 'aka', 'name'], pi_ref[0])
return pi.get('_id') |
def group_member_ids(ppl_coll, grpname):
'Get a list of all group member ids\n\n Parameters\n ----------\n ppl_coll: collection (list of dicts)\n The people collection that should contain the group members\n grp: string\n The id of the group in groups.yml\n\n Returns\n -------\n set:\n The set of ids of the people in the group\n\n Notes\n -----\n - Groups that are being tracked are listed in the groups.yml collection\n with a name and an id.\n - People are in a group during an educational or employment period.\n - To assign a person to a tracked group during one such period, add\n a "group" key to that education/employment item with a value\n that is the group id.\n - This function takes the group id that is passed and searches\n the people collection for all people that have been\n assigned to that group in some period of time and returns a list of\n '
grpmembers = set()
for person in ppl_coll:
for k in ['education', 'employment']:
for position in person.get(k, {}):
if (position.get('group', None) == grpname):
grpmembers.add(person['_id'])
return grpmembers | 7,553,083,287,698,605,000 | Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of | regolith/tools.py | group_member_ids | jc-umana/regolith | python | def group_member_ids(ppl_coll, grpname):
'Get a list of all group member ids\n\n Parameters\n ----------\n ppl_coll: collection (list of dicts)\n The people collection that should contain the group members\n grp: string\n The id of the group in groups.yml\n\n Returns\n -------\n set:\n The set of ids of the people in the group\n\n Notes\n -----\n - Groups that are being tracked are listed in the groups.yml collection\n with a name and an id.\n - People are in a group during an educational or employment period.\n - To assign a person to a tracked group during one such period, add\n a "group" key to that education/employment item with a value\n that is the group id.\n - This function takes the group id that is passed and searches\n the people collection for all people that have been\n assigned to that group in some period of time and returns a list of\n '
grpmembers = set()
for person in ppl_coll:
for k in ['education', 'employment']:
for position in person.get(k, {}):
if (position.get('group', None) == grpname):
grpmembers.add(person['_id'])
return grpmembers |
def report_func(df: pd.DataFrame, **kwargs: Any) -> None:
'\n Create report function, used for performance testing.\n '
create_report(df, **kwargs) | -991,893,151,365,360,800 | Create report function, used for performance testing. | dataprep/tests/benchmarks/eda.py | report_func | Bowen0729/dataprep | python | def report_func(df: pd.DataFrame, **kwargs: Any) -> None:
'\n \n '
create_report(df, **kwargs) |
def test_create_report(benchmark: Any) -> None:
'\n Performance test of create report on titanic dataset.\n '
df = load_dataset('titanic')
benchmark(partial(report_func), df) | 2,975,394,867,720,784,000 | Performance test of create report on titanic dataset. | dataprep/tests/benchmarks/eda.py | test_create_report | Bowen0729/dataprep | python | def test_create_report(benchmark: Any) -> None:
'\n \n '
df = load_dataset('titanic')
benchmark(partial(report_func), df) |
def _get_metadata(*parameters: Parameter) -> Dict[(str, Any)]:
'\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n '
metadata_timestamp = time.time()
metas: dict = defaultdict(list)
for parameter in parameters:
meta: Dict[(str, Optional[Union[(float, str)]])] = {}
meta['value'] = str(parameter.get_latest())
timestamp = parameter.get_latest.get_timestamp()
if (timestamp is not None):
meta['ts'] = timestamp.timestamp()
else:
meta['ts'] = None
meta['name'] = (parameter.label or parameter.name)
meta['unit'] = parameter.unit
baseinst = parameter.root_instrument
if (baseinst is None):
metas['Unbound Parameter'].append(meta)
else:
metas[str(baseinst)].append(meta)
parameters_out = []
for instrument in metas:
temp = {'instrument': instrument, 'parameters': metas[instrument]}
parameters_out.append(temp)
state = {'ts': metadata_timestamp, 'parameters': parameters_out}
return state | -1,330,515,832,946,222,600 | Return a dictionary that contains the parameter metadata grouped by the
instrument it belongs to. | qcodes/monitor/monitor.py | _get_metadata | Akshita07/Qcodes | python | def _get_metadata(*parameters: Parameter) -> Dict[(str, Any)]:
'\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n '
metadata_timestamp = time.time()
metas: dict = defaultdict(list)
for parameter in parameters:
meta: Dict[(str, Optional[Union[(float, str)]])] = {}
meta['value'] = str(parameter.get_latest())
timestamp = parameter.get_latest.get_timestamp()
if (timestamp is not None):
meta['ts'] = timestamp.timestamp()
else:
meta['ts'] = None
meta['name'] = (parameter.label or parameter.name)
meta['unit'] = parameter.unit
baseinst = parameter.root_instrument
if (baseinst is None):
metas['Unbound Parameter'].append(meta)
else:
metas[str(baseinst)].append(meta)
parameters_out = []
for instrument in metas:
temp = {'instrument': instrument, 'parameters': metas[instrument]}
parameters_out.append(temp)
state = {'ts': metadata_timestamp, 'parameters': parameters_out}
return state |
def _handler(parameters: Sequence[Parameter], interval: float) -> Callable[([websockets.WebSocketServerProtocol, str], Awaitable[None])]:
'\n Return the websockets server handler.\n '
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
'\n Create a websockets handler that sends parameter values to a listener\n every "interval" seconds.\n '
while True:
try:
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception('Error getting parameters')
break
log.debug('sending.. to %r', websocket)
(await websocket.send(json.dumps(meta)))
(await asyncio.sleep(interval))
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug('Got CancelledError or ConnectionClosed', exc_info=True)
break
log.debug('Closing websockets connection')
return server_func | 2,602,754,282,914,915,300 | Return the websockets server handler. | qcodes/monitor/monitor.py | _handler | Akshita07/Qcodes | python | def _handler(parameters: Sequence[Parameter], interval: float) -> Callable[([websockets.WebSocketServerProtocol, str], Awaitable[None])]:
'\n \n '
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
'\n Create a websockets handler that sends parameter values to a listener\n every "interval" seconds.\n '
while True:
try:
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception('Error getting parameters')
break
log.debug('sending.. to %r', websocket)
(await websocket.send(json.dumps(meta)))
(await asyncio.sleep(interval))
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug('Got CancelledError or ConnectionClosed', exc_info=True)
break
log.debug('Closing websockets connection')
return server_func |
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
'\n Create a websockets handler that sends parameter values to a listener\n every "interval" seconds.\n '
while True:
try:
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception('Error getting parameters')
break
log.debug('sending.. to %r', websocket)
(await websocket.send(json.dumps(meta)))
(await asyncio.sleep(interval))
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug('Got CancelledError or ConnectionClosed', exc_info=True)
break
log.debug('Closing websockets connection') | -3,658,804,037,770,032,600 | Create a websockets handler that sends parameter values to a listener
every "interval" seconds. | qcodes/monitor/monitor.py | server_func | Akshita07/Qcodes | python | async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
'\n Create a websockets handler that sends parameter values to a listener\n every "interval" seconds.\n '
while True:
try:
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception('Error getting parameters')
break
log.debug('sending.. to %r', websocket)
(await websocket.send(json.dumps(meta)))
(await asyncio.sleep(interval))
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug('Got CancelledError or ConnectionClosed', exc_info=True)
break
log.debug('Closing websockets connection') |
def __init__(self, *parameters: Parameter, interval: float=1):
'\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n '
super().__init__()
for parameter in parameters:
if (not isinstance(parameter, Parameter)):
raise TypeError(f'We can only monitor QCodes Parameters, not {type(parameter)}')
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.server: Optional[websockets.WebSocketServer] = None
self._parameters = parameters
self.loop_is_closed = Event()
self.server_is_started = Event()
self.handler = _handler(parameters, interval=interval)
log.debug('Start monitoring thread')
if Monitor.running:
log.debug('Stopping and restarting server')
Monitor.running.stop()
self.start()
self.server_is_started.wait(timeout=5)
if (not self.server_is_started.is_set()):
raise RuntimeError('Failed to start server')
Monitor.running = self | 5,824,786,925,298,103,000 | Monitor qcodes parameters.
Args:
*parameters: Parameters to monitor.
interval: How often one wants to refresh the values. | qcodes/monitor/monitor.py | __init__ | Akshita07/Qcodes | python | def __init__(self, *parameters: Parameter, interval: float=1):
'\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n '
super().__init__()
for parameter in parameters:
if (not isinstance(parameter, Parameter)):
raise TypeError(f'We can only monitor QCodes Parameters, not {type(parameter)}')
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.server: Optional[websockets.WebSocketServer] = None
self._parameters = parameters
self.loop_is_closed = Event()
self.server_is_started = Event()
self.handler = _handler(parameters, interval=interval)
log.debug('Start monitoring thread')
if Monitor.running:
log.debug('Stopping and restarting server')
Monitor.running.stop()
self.start()
self.server_is_started.wait(timeout=5)
if (not self.server_is_started.is_set()):
raise RuntimeError('Failed to start server')
Monitor.running = self |
def run(self) -> None:
'\n Start the event loop and run forever.\n '
log.debug('Running Websocket server')
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
server_start = websockets.serve(self.handler, '127.0.0.1', WEBSOCKET_PORT, close_timeout=1)
self.server = self.loop.run_until_complete(server_start)
self.server_is_started.set()
self.loop.run_forever()
except OSError:
log.exception('Server could not be started')
finally:
log.debug('loop stopped')
log.debug('Pending tasks at close: %r', all_tasks(self.loop))
self.loop.close()
log.debug('loop closed')
self.loop_is_closed.set() | 6,689,847,950,625,658,000 | Start the event loop and run forever. | qcodes/monitor/monitor.py | run | Akshita07/Qcodes | python | def run(self) -> None:
'\n \n '
log.debug('Running Websocket server')
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
server_start = websockets.serve(self.handler, '127.0.0.1', WEBSOCKET_PORT, close_timeout=1)
self.server = self.loop.run_until_complete(server_start)
self.server_is_started.set()
self.loop.run_forever()
except OSError:
log.exception('Server could not be started')
finally:
log.debug('loop stopped')
log.debug('Pending tasks at close: %r', all_tasks(self.loop))
self.loop.close()
log.debug('loop closed')
self.loop_is_closed.set() |
def update_all(self) -> None:
'\n Update all parameters in the monitor.\n '
for parameter in self._parameters:
with suppress(TypeError):
parameter.get() | 6,364,364,353,977,618,000 | Update all parameters in the monitor. | qcodes/monitor/monitor.py | update_all | Akshita07/Qcodes | python | def update_all(self) -> None:
'\n \n '
for parameter in self._parameters:
with suppress(TypeError):
parameter.get() |
def stop(self) -> None:
'\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n '
self.join()
Monitor.running = None | -9,010,464,473,570,652,000 | Shutdown the server, close the event loop and join the thread.
Setting active Monitor to ``None``. | qcodes/monitor/monitor.py | stop | Akshita07/Qcodes | python | def stop(self) -> None:
'\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n '
self.join()
Monitor.running = None |
def join(self, timeout: Optional[float]=None) -> None:
'\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n '
log.debug('Shutting down server')
if (not self.is_alive()):
log.debug('monitor is dead')
return
try:
if (self.loop is not None):
asyncio.run_coroutine_threadsafe(self.__stop_server(), self.loop)
except RuntimeError:
log.exception('Could not close loop')
self.loop_is_closed.wait(timeout=5)
if (not self.loop_is_closed.is_set()):
raise RuntimeError('Failed to join loop')
log.debug('Loop reported closed')
super().join(timeout=timeout)
log.debug('Monitor Thread has joined') | 1,480,710,171,094,641,700 | Overwrite ``Thread.join`` to make sure server is stopped before
joining avoiding a potential deadlock. | qcodes/monitor/monitor.py | join | Akshita07/Qcodes | python | def join(self, timeout: Optional[float]=None) -> None:
'\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n '
log.debug('Shutting down server')
if (not self.is_alive()):
log.debug('monitor is dead')
return
try:
if (self.loop is not None):
asyncio.run_coroutine_threadsafe(self.__stop_server(), self.loop)
except RuntimeError:
log.exception('Could not close loop')
self.loop_is_closed.wait(timeout=5)
if (not self.loop_is_closed.is_set()):
raise RuntimeError('Failed to join loop')
log.debug('Loop reported closed')
super().join(timeout=timeout)
log.debug('Monitor Thread has joined') |
@staticmethod
def show() -> None:
'\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = "localhost:3000"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n '
webbrowser.open('http://localhost:{}'.format(SERVER_PORT)) | 3,710,049,875,675,028,000 | Overwrite this method to show/raise your monitor GUI
F.ex.
::
import webbrowser
url = "localhost:3000"
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url) | qcodes/monitor/monitor.py | show | Akshita07/Qcodes | python | @staticmethod
def show() -> None:
'\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = "localhost:3000"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n '
webbrowser.open('http://localhost:{}'.format(SERVER_PORT)) |
def set_test_params(self):
' Setup test environment\n :param:\n :return:\n '
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = ([['-staking=1', '-debug=net']] * self.num_nodes) | -2,748,070,360,690,059,300 | Setup test environment
:param:
:return: | test/functional/fake_stake/base_test.py | set_test_params | tdpsdevextreme/TradePlusCoin | python | def set_test_params(self):
' Setup test environment\n :param:\n :return:\n '
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = ([['-staking=1', '-debug=net']] * self.num_nodes) |
def setup_network(self):
" Can't rely on syncing all the nodes when staking=1\n :param:\n :return:\n "
self.setup_nodes()
for i in range((self.num_nodes - 1)):
for j in range((i + 1), self.num_nodes):
connect_nodes_bi(self.nodes, i, j) | 4,878,764,237,105,450,000 | Can't rely on syncing all the nodes when staking=1
:param:
:return: | test/functional/fake_stake/base_test.py | setup_network | tdpsdevextreme/TradePlusCoin | python | def setup_network(self):
" Can't rely on syncing all the nodes when staking=1\n :param:\n :return:\n "
self.setup_nodes()
for i in range((self.num_nodes - 1)):
for j in range((i + 1), self.num_nodes):
connect_nodes_bi(self.nodes, i, j) |
def init_test(self):
' Initializes test parameters\n :param:\n :return:\n '
title = ('*** Starting %s ***' % self.__class__.__name__)
underline = ('-' * len(title))
self.log.info('\n\n%s\n%s\n%s\n', title, underline, self.description)
self.DEFAULT_FEE = 0.1
self.NUM_BLOCKS = 30
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start()
self.node = self.nodes[0]
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack() | -5,515,092,755,267,224,000 | Initializes test parameters
:param:
:return: | test/functional/fake_stake/base_test.py | init_test | tdpsdevextreme/TradePlusCoin | python | def init_test(self):
' Initializes test parameters\n :param:\n :return:\n '
title = ('*** Starting %s ***' % self.__class__.__name__)
underline = ('-' * len(title))
self.log.info('\n\n%s\n%s\n%s\n', title, underline, self.description)
self.DEFAULT_FEE = 0.1
self.NUM_BLOCKS = 30
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start()
self.node = self.nodes[0]
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack() |
def run_test(self):
' Performs the attack of this test - run init_test first.\n :param:\n :return:\n '
self.description = ''
self.init_test()
return | 3,115,403,244,731,384,000 | Performs the attack of this test - run init_test first.
:param:
:return: | test/functional/fake_stake/base_test.py | run_test | tdpsdevextreme/TradePlusCoin | python | def run_test(self):
' Performs the attack of this test - run init_test first.\n :param:\n :return:\n '
self.description =
self.init_test()
return |
def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}):
' creates a block to spam the network with\n :param hashPrevBlock: (hex string) hash of previous block\n stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake\n height: (int) block height\n fStakeDoubleSpent: (bool) spend the coinstake input inside the block\n fZPoS: (bool) stake the block with zerocoin\n spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake\n :return block: (CBlock) generated block\n '
if (len(spendingPrevOuts) == 0):
spendingPrevOuts = dict(stakingPrevOuts)
current_time = int(time.time())
nTime = (current_time & 4294967280)
coinbase = create_coinbase(height)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b''
coinbase.nTime = nTime
coinbase.rehash()
block = create_block(int(hashPrevBlock, 16), coinbase, nTime)
if (not block.solve_stake(stakingPrevOuts)):
raise Exception('Not able to solve for any prev_outpoint')
signed_stake_tx = self.sign_stake_tx(block, stakingPrevOuts[block.prevoutStake][0], fZPoS)
block.vtx.append(signed_stake_tx)
if ((not fZPoS) and (not fStakeDoubleSpent)):
del spendingPrevOuts[block.prevoutStake]
if (len(spendingPrevOuts) > 0):
del spendingPrevOuts[choice(list(spendingPrevOuts))]
for outPoint in spendingPrevOuts:
value_out = int((spendingPrevOuts[outPoint][0] - (self.DEFAULT_FEE * COIN)))
tx = create_transaction(outPoint, b'', value_out, nTime, scriptPubKey=CScript([self.block_sig_key.get_pubkey(), OP_CHECKSIG]))
signed_tx_hex = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
signed_tx = CTransaction()
signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex)))
block.vtx.append(signed_tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.sign_block(self.block_sig_key)
return block | 5,502,691,599,703,268,000 | creates a block to spam the network with
:param hashPrevBlock: (hex string) hash of previous block
stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake
height: (int) block height
fStakeDoubleSpent: (bool) spend the coinstake input inside the block
fZPoS: (bool) stake the block with zerocoin
spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake
:return block: (CBlock) generated block | test/functional/fake_stake/base_test.py | create_spam_block | tdpsdevextreme/TradePlusCoin | python | def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}):
' creates a block to spam the network with\n :param hashPrevBlock: (hex string) hash of previous block\n stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake\n height: (int) block height\n fStakeDoubleSpent: (bool) spend the coinstake input inside the block\n fZPoS: (bool) stake the block with zerocoin\n spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake\n :return block: (CBlock) generated block\n '
if (len(spendingPrevOuts) == 0):
spendingPrevOuts = dict(stakingPrevOuts)
current_time = int(time.time())
nTime = (current_time & 4294967280)
coinbase = create_coinbase(height)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b
coinbase.nTime = nTime
coinbase.rehash()
block = create_block(int(hashPrevBlock, 16), coinbase, nTime)
if (not block.solve_stake(stakingPrevOuts)):
raise Exception('Not able to solve for any prev_outpoint')
signed_stake_tx = self.sign_stake_tx(block, stakingPrevOuts[block.prevoutStake][0], fZPoS)
block.vtx.append(signed_stake_tx)
if ((not fZPoS) and (not fStakeDoubleSpent)):
del spendingPrevOuts[block.prevoutStake]
if (len(spendingPrevOuts) > 0):
del spendingPrevOuts[choice(list(spendingPrevOuts))]
for outPoint in spendingPrevOuts:
value_out = int((spendingPrevOuts[outPoint][0] - (self.DEFAULT_FEE * COIN)))
tx = create_transaction(outPoint, b, value_out, nTime, scriptPubKey=CScript([self.block_sig_key.get_pubkey(), OP_CHECKSIG]))
signed_tx_hex = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
signed_tx = CTransaction()
signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex)))
block.vtx.append(signed_tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.sign_block(self.block_sig_key)
return block |
def spend_utxo(self, utxo, address_list):
' spend amount from previously unspent output to a provided address\n :param utxo: (JSON) returned from listunspent used as input\n addresslist: (string) destination address\n :return: txhash: (string) tx hash if successful, empty string otherwise\n '
try:
inputs = [{'txid': utxo['txid'], 'vout': utxo['vout']}]
out_amount = ((float(utxo['amount']) - self.DEFAULT_FEE) / len(address_list))
outputs = {}
for address in address_list:
outputs[address] = out_amount
spendingTx = self.node.createrawtransaction(inputs, outputs)
spendingTx_signed = self.node.signrawtransaction(spendingTx)
if spendingTx_signed['complete']:
txhash = self.node.sendrawtransaction(spendingTx_signed['hex'])
return txhash
else:
self.log.warning(('Error: %s' % str(spendingTx_signed['errors'])))
return ''
except JSONRPCException as e:
self.log.error(('JSONRPCException: %s' % str(e)))
return '' | -1,279,210,864,629,148,000 | spend amount from previously unspent output to a provided address
:param utxo: (JSON) returned from listunspent used as input
addresslist: (string) destination address
:return: txhash: (string) tx hash if successful, empty string otherwise | test/functional/fake_stake/base_test.py | spend_utxo | tdpsdevextreme/TradePlusCoin | python | def spend_utxo(self, utxo, address_list):
' spend amount from previously unspent output to a provided address\n :param utxo: (JSON) returned from listunspent used as input\n addresslist: (string) destination address\n :return: txhash: (string) tx hash if successful, empty string otherwise\n '
try:
inputs = [{'txid': utxo['txid'], 'vout': utxo['vout']}]
out_amount = ((float(utxo['amount']) - self.DEFAULT_FEE) / len(address_list))
outputs = {}
for address in address_list:
outputs[address] = out_amount
spendingTx = self.node.createrawtransaction(inputs, outputs)
spendingTx_signed = self.node.signrawtransaction(spendingTx)
if spendingTx_signed['complete']:
txhash = self.node.sendrawtransaction(spendingTx_signed['hex'])
return txhash
else:
self.log.warning(('Error: %s' % str(spendingTx_signed['errors'])))
return
except JSONRPCException as e:
self.log.error(('JSONRPCException: %s' % str(e)))
return |
def spend_utxos(self, utxo_list, address_list=[]):
' spend utxos to provided list of addresses or 10 new generate ones.\n :param utxo_list: (JSON list) returned from listunspent used as input\n address_list: (string list) [optional] recipient TradePlus_Coin addresses. if not set,\n 10 new addresses will be generated from the wallet for each tx.\n :return: txHashes (string list) tx hashes\n '
txHashes = []
if (address_list == []):
for i in range(10):
address_list.append(self.node.getnewaddress())
for utxo in utxo_list:
try:
txHash = self.spend_utxo(utxo, address_list)
if (txHash != ''):
txHashes.append(txHash)
except JSONRPCException as e:
self.log.error(('JSONRPCException: %s' % str(e)))
continue
return txHashes | -6,621,179,609,126,178,000 | spend utxos to provided list of addresses or 10 new generate ones.
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient TradePlus_Coin addresses. if not set,
10 new addresses will be generated from the wallet for each tx.
:return: txHashes (string list) tx hashes | test/functional/fake_stake/base_test.py | spend_utxos | tdpsdevextreme/TradePlusCoin | python | def spend_utxos(self, utxo_list, address_list=[]):
' spend utxos to provided list of addresses or 10 new generate ones.\n :param utxo_list: (JSON list) returned from listunspent used as input\n address_list: (string list) [optional] recipient TradePlus_Coin addresses. if not set,\n 10 new addresses will be generated from the wallet for each tx.\n :return: txHashes (string list) tx hashes\n '
txHashes = []
if (address_list == []):
for i in range(10):
address_list.append(self.node.getnewaddress())
for utxo in utxo_list:
try:
txHash = self.spend_utxo(utxo, address_list)
if (txHash != ):
txHashes.append(txHash)
except JSONRPCException as e:
self.log.error(('JSONRPCException: %s' % str(e)))
continue
return txHashes |
def stake_amplification_step(self, utxo_list, address_list=[]):
' spends a list of utxos providing the list of new outputs\n :param utxo_list: (JSON list) returned from listunspent used as input\n address_list: (string list) [optional] recipient TradePlus_Coin addresses.\n :return: new_utxos: (JSON list) list of new (valid) inputs after the spends\n '
self.log.info('--> Stake Amplification step started with %d UTXOs', len(utxo_list))
txHashes = self.spend_utxos(utxo_list, address_list)
num_of_txes = len(txHashes)
new_utxos = []
if (num_of_txes > 0):
self.log.info(('Created %d transactions...Mining 2 blocks to include them...' % num_of_txes))
self.node.generate(2)
time.sleep(2)
new_utxos = self.node.listunspent()
self.log.info(('Amplification step produced %d new "Fake Stake" inputs:' % len(new_utxos)))
return new_utxos | 1,172,270,517,256,876,300 | spends a list of utxos providing the list of new outputs
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient TradePlus_Coin addresses.
:return: new_utxos: (JSON list) list of new (valid) inputs after the spends | test/functional/fake_stake/base_test.py | stake_amplification_step | tdpsdevextreme/TradePlusCoin | python | def stake_amplification_step(self, utxo_list, address_list=[]):
' spends a list of utxos providing the list of new outputs\n :param utxo_list: (JSON list) returned from listunspent used as input\n address_list: (string list) [optional] recipient TradePlus_Coin addresses.\n :return: new_utxos: (JSON list) list of new (valid) inputs after the spends\n '
self.log.info('--> Stake Amplification step started with %d UTXOs', len(utxo_list))
txHashes = self.spend_utxos(utxo_list, address_list)
num_of_txes = len(txHashes)
new_utxos = []
if (num_of_txes > 0):
self.log.info(('Created %d transactions...Mining 2 blocks to include them...' % num_of_txes))
self.node.generate(2)
time.sleep(2)
new_utxos = self.node.listunspent()
self.log.info(('Amplification step produced %d new "Fake Stake" inputs:' % len(new_utxos)))
return new_utxos |
def stake_amplification(self, utxo_list, iterations, address_list=[]):
' performs the "stake amplification" which gives higher chances at finding fake stakes\n :param utxo_list: (JSON list) returned from listunspent used as input\n iterations: (int) amount of stake amplification steps to perform\n address_list: (string list) [optional] recipient TradePlus_Coin addresses.\n :return: all_inputs: (JSON list) list of all spent inputs\n '
self.log.info('** Stake Amplification started with %d UTXOs', len(utxo_list))
valid_inputs = utxo_list
all_inputs = []
for i in range(iterations):
all_inputs = (all_inputs + valid_inputs)
old_inputs = valid_inputs
valid_inputs = self.stake_amplification_step(old_inputs, address_list)
self.log.info('** Stake Amplification ended with %d "fake" UTXOs', len(all_inputs))
return all_inputs | 5,069,938,516,817,489,000 | performs the "stake amplification" which gives higher chances at finding fake stakes
:param utxo_list: (JSON list) returned from listunspent used as input
iterations: (int) amount of stake amplification steps to perform
address_list: (string list) [optional] recipient TradePlus_Coin addresses.
:return: all_inputs: (JSON list) list of all spent inputs | test/functional/fake_stake/base_test.py | stake_amplification | tdpsdevextreme/TradePlusCoin | python | def stake_amplification(self, utxo_list, iterations, address_list=[]):
' performs the "stake amplification" which gives higher chances at finding fake stakes\n :param utxo_list: (JSON list) returned from listunspent used as input\n iterations: (int) amount of stake amplification steps to perform\n address_list: (string list) [optional] recipient TradePlus_Coin addresses.\n :return: all_inputs: (JSON list) list of all spent inputs\n '
self.log.info('** Stake Amplification started with %d UTXOs', len(utxo_list))
valid_inputs = utxo_list
all_inputs = []
for i in range(iterations):
all_inputs = (all_inputs + valid_inputs)
old_inputs = valid_inputs
valid_inputs = self.stake_amplification_step(old_inputs, address_list)
self.log.info('** Stake Amplification ended with %d "fake" UTXOs', len(all_inputs))
return all_inputs |
def sign_stake_tx(self, block, stake_in_value, fZPoS=False):
' signs a coinstake transaction\n :param block: (CBlock) block with stake to sign\n stake_in_value: (int) staked amount\n fZPoS: (bool) zerocoin stake\n :return: stake_tx_signed: (CTransaction) signed tx\n '
self.block_sig_key = CECKey()
if fZPoS:
self.log.info('Signing zPoS stake...')
raw_stake = self.node.createrawzerocoinstake(block.prevoutStake)
stake_tx_signed_raw_hex = raw_stake['hex']
stake_pkey = raw_stake['private-key']
self.block_sig_key.set_compressed(True)
self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey))
else:
self.block_sig_key.set_secretbytes(hash256(pack('<I', 65535)))
pubkey = self.block_sig_key.get_pubkey()
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
outNValue = int((stake_in_value + (2 * COIN)))
stake_tx_unsigned = CTransaction()
stake_tx_unsigned.nTime = block.nTime
stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake))
stake_tx_unsigned.vin[0].nSequence = 4294967295
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey))
stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)))
return stake_tx_signed | -4,420,679,656,985,043,500 | signs a coinstake transaction
:param block: (CBlock) block with stake to sign
stake_in_value: (int) staked amount
fZPoS: (bool) zerocoin stake
:return: stake_tx_signed: (CTransaction) signed tx | test/functional/fake_stake/base_test.py | sign_stake_tx | tdpsdevextreme/TradePlusCoin | python | def sign_stake_tx(self, block, stake_in_value, fZPoS=False):
' signs a coinstake transaction\n :param block: (CBlock) block with stake to sign\n stake_in_value: (int) staked amount\n fZPoS: (bool) zerocoin stake\n :return: stake_tx_signed: (CTransaction) signed tx\n '
self.block_sig_key = CECKey()
if fZPoS:
self.log.info('Signing zPoS stake...')
raw_stake = self.node.createrawzerocoinstake(block.prevoutStake)
stake_tx_signed_raw_hex = raw_stake['hex']
stake_pkey = raw_stake['private-key']
self.block_sig_key.set_compressed(True)
self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey))
else:
self.block_sig_key.set_secretbytes(hash256(pack('<I', 65535)))
pubkey = self.block_sig_key.get_pubkey()
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
outNValue = int((stake_in_value + (2 * COIN)))
stake_tx_unsigned = CTransaction()
stake_tx_unsigned.nTime = block.nTime
stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake))
stake_tx_unsigned.vin[0].nSequence = 4294967295
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey))
stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)))
return stake_tx_signed |
def get_prevouts(self, utxo_list, blockHeight, zpos=False):
' get prevouts (map) for each utxo in a list\n :param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input\n <if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input\n blockHeight: (int) height of the previous block\n zpos: (bool) type of utxo_list\n :return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints to amount, block_time, nStakeModifier, hashStake\n '
zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000]
stakingPrevOuts = {}
for utxo in utxo_list:
if zpos:
checkpointHeight = (blockHeight - 200)
checkpointBlock = self.node.getblock(self.node.getblockhash(checkpointHeight), True)
checkpoint = int(checkpointBlock['acc_checkpoint'], 16)
pos = zerocoinDenomList.index(utxo['denomination'])
checksum = ((checkpoint >> (32 * ((len(zerocoinDenomList) - 1) - pos))) & 4294967295)
checksumBlock = self.node.getchecksumblock(hex(checksum), utxo['denomination'], True)
txBlockhash = checksumBlock['hash']
txBlocktime = checksumBlock['time']
else:
utxo_tx = self.node.getrawtransaction(utxo['txid'], 1)
txBlocktime = utxo_tx['blocktime']
txBlockhash = utxo_tx['blockhash']
stakeModifier = int(self.node.getblock(txBlockhash)['modifier'], 16)
utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos)
return stakingPrevOuts | -6,867,077,410,439,967,000 | get prevouts (map) for each utxo in a list
:param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input
<if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input
blockHeight: (int) height of the previous block
zpos: (bool) type of utxo_list
:return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints to amount, block_time, nStakeModifier, hashStake | test/functional/fake_stake/base_test.py | get_prevouts | tdpsdevextreme/TradePlusCoin | python | def get_prevouts(self, utxo_list, blockHeight, zpos=False):
' get prevouts (map) for each utxo in a list\n :param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input\n <if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input\n blockHeight: (int) height of the previous block\n zpos: (bool) type of utxo_list\n :return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)\n map outpoints to amount, block_time, nStakeModifier, hashStake\n '
zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000]
stakingPrevOuts = {}
for utxo in utxo_list:
if zpos:
checkpointHeight = (blockHeight - 200)
checkpointBlock = self.node.getblock(self.node.getblockhash(checkpointHeight), True)
checkpoint = int(checkpointBlock['acc_checkpoint'], 16)
pos = zerocoinDenomList.index(utxo['denomination'])
checksum = ((checkpoint >> (32 * ((len(zerocoinDenomList) - 1) - pos))) & 4294967295)
checksumBlock = self.node.getchecksumblock(hex(checksum), utxo['denomination'], True)
txBlockhash = checksumBlock['hash']
txBlocktime = checksumBlock['time']
else:
utxo_tx = self.node.getrawtransaction(utxo['txid'], 1)
txBlocktime = utxo_tx['blocktime']
txBlockhash = utxo_tx['blockhash']
stakeModifier = int(self.node.getblock(txBlockhash)['modifier'], 16)
utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos)
return stakingPrevOuts |
def log_data_dir_size(self):
" Prints the size of the '/regtest/blocks' directory.\n :param:\n :return:\n "
init_size = dir_size((self.node.datadir + '/regtest/blocks'))
self.log.info(('Size of data dir: %s kilobytes' % str(init_size))) | 4,494,430,405,223,330,000 | Prints the size of the '/regtest/blocks' directory.
:param:
:return: | test/functional/fake_stake/base_test.py | log_data_dir_size | tdpsdevextreme/TradePlusCoin | python | def log_data_dir_size(self):
" Prints the size of the '/regtest/blocks' directory.\n :param:\n :return:\n "
init_size = dir_size((self.node.datadir + '/regtest/blocks'))
self.log.info(('Size of data dir: %s kilobytes' % str(init_size))) |
Subsets and Splits