code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/evn python
# -*- coding:utf-8 -*-
from shadon.tsetsHttp import testsHttp
from shadon.testsConfig import testsConfig
import os
class testsToken():
def __init__(self):
self.url = '/oauth/authorizationServer/accessToken'
self.mytestsConfig = testsConfig()
self.mytestsConfig.getConfig()
self.path = os.path.dirname(__file__) + "/../config/" + self.mytestsConfig.env + "/"
self.grant='client_credentials'
pass
def setGrant(self,grant):
global localgrant
localgrant = grant
if os.path.exists(self.path + 'token.txt') != True:
os.remove(self.path + 'token.txt')
pass
def getToken(self):
global apiToken
if os.path.exists(self.path+ 'token.txt') != True:
self.setToken(localgrant)
file = open(self.path + 'token.txt', 'r')
value = file.read()
apiToken = eval(value)
file.close()
return apiToken
def setToken(self,grant):
myhttp = testsHttp()
myhttp.set_url(self.url)
self.data = {"grant_type": "client_credentials", "client_id": self.mytestsConfig.client_id,"client_secret": self.mytestsConfig.client_secret}
if grant == 'password':
self.mytestsConfig.grant_type = self.mytestsConfig.getFile('password', 'grant_type')
self.mytestsConfig.username = self.mytestsConfig.getFile('password', 'username')
self.mytestsConfig.password = self.mytestsConfig.getFile('password', 'password')
self.data = {"grant_type": "password", "client_id": self.mytestsConfig.client_id,"client_secret": self.mytestsConfig.client_secret,"username":self.mytestsConfig.username,"password":self.mytestsConfig.password}
myhttp.set_data(self.data)
tokenInfo =myhttp.post().json()
#如果目录不存在,建立目录
if os.path.exists(self.path) != True:
os.makedirs(self.path)
#写入数据
file = open(self.path+'token.txt','w')
file.write(str(tokenInfo))
file.close()
pass
if __name__ == "__main__":
shadon = testsToken()
shadon.setToken('<PASSWORD>')
print(shadon.getToken())
|
[
"os.remove",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"shadon.testsConfig.testsConfig",
"shadon.tsetsHttp.testsHttp"
] |
[((273, 286), 'shadon.testsConfig.testsConfig', 'testsConfig', ([], {}), '()\n', (284, 286), False, 'from shadon.testsConfig import testsConfig\n'), ((1024, 1035), 'shadon.tsetsHttp.testsHttp', 'testsHttp', ([], {}), '()\n', (1033, 1035), False, 'from shadon.tsetsHttp import testsHttp\n'), ((567, 606), 'os.path.exists', 'os.path.exists', (["(self.path + 'token.txt')"], {}), "(self.path + 'token.txt')\n", (581, 606), False, 'import os\n'), ((628, 662), 'os.remove', 'os.remove', (["(self.path + 'token.txt')"], {}), "(self.path + 'token.txt')\n", (637, 662), False, 'import os\n'), ((736, 775), 'os.path.exists', 'os.path.exists', (["(self.path + 'token.txt')"], {}), "(self.path + 'token.txt')\n", (750, 775), False, 'import os\n'), ((1864, 1889), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (1878, 1889), False, 'import os\n'), ((1911, 1933), 'os.makedirs', 'os.makedirs', (['self.path'], {}), '(self.path)\n', (1922, 1933), False, 'import os\n'), ((346, 371), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (361, 371), False, 'import os\n')]
|
from parameterized import parameterized
from rest_framework import status
from rest_framework.reverse import reverse
from api.applications.enums import (
ApplicationExportType,
ApplicationExportLicenceOfficialType,
GoodsTypeCategory,
)
from api.applications.models import (
StandardApplication,
OpenApplication,
HmrcQuery,
BaseApplication,
ExhibitionClearanceApplication,
GiftingClearanceApplication,
F680ClearanceApplication,
)
from api.cases.enums import CaseTypeEnum, CaseTypeReferenceEnum
from lite_content.lite_api import strings
from api.staticdata.trade_control.enums import TradeControlActivity, TradeControlProductCategory
from test_helpers.clients import DataTestClient
class DraftTests(DataTestClient):
url = reverse("applications:applications")
def test_create_draft_standard_individual_export_application_successful(self):
"""
Ensure we can create a new standard individual export application draft
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.SIEL,
"export_type": ApplicationExportType.TEMPORARY,
"have_you_been_informed": ApplicationExportLicenceOfficialType.YES,
"reference_number_on_information_form": "123",
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()
standard_application = StandardApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response_data["id"], str(standard_application.id))
self.assertEqual(StandardApplication.objects.count(), 1)
def test_create_draft_exhibition_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(ExhibitionClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.EXHC,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ExhibitionClearanceApplication.objects.count(), 1)
def test_create_draft_gifting_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(GiftingClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.GIFT,
}
response = self.client.post(self.url, data, **self.exporter_headers)
application = GiftingClearanceApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(GiftingClearanceApplication.objects.count(), 1)
self.assertEqual(application.name, data["name"])
self.assertEqual(application.case_type.id, CaseTypeEnum.GIFTING.id)
def test_create_draft_f680_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(F680ClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.F680,
}
response = self.client.post(self.url, data, **self.exporter_headers)
application = F680ClearanceApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(F680ClearanceApplication.objects.count(), 1)
self.assertEqual(application.name, data["name"])
self.assertEqual(application.case_type.id, CaseTypeEnum.F680.id)
def test_create_draft_open_application_successful(self):
"""
Ensure we can create a new open application draft object
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.OIEL,
"export_type": ApplicationExportType.TEMPORARY,
"goodstype_category": GoodsTypeCategory.MILITARY,
"contains_firearm_goods": True,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(OpenApplication.objects.count(), 1)
def test_create_draft_hmrc_query_successful(self):
"""
Ensure we can create a new HMRC query draft object
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.CRE,
"organisation": self.organisation.id,
}
response = self.client.post(self.url, data, **self.hmrc_exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(HmrcQuery.objects.count(), 1)
def test_create_draft_hmrc_query_failure(self):
"""
Ensure that a normal exporter cannot create an HMRC query
"""
data = {
"application_type": CaseTypeReferenceEnum.CRE,
"organisation": self.organisation.id,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(HmrcQuery.objects.count(), 0)
@parameterized.expand(
[
[{}],
[{"application_type": CaseTypeReferenceEnum.SIEL, "export_type": ApplicationExportType.TEMPORARY}],
[{"name": "Test", "export_type": ApplicationExportType.TEMPORARY}],
[{"name": "Test", "application_type": CaseTypeReferenceEnum.SIEL}],
[{"application_type": CaseTypeReferenceEnum.EXHC}],
[{"name": "Test"}],
]
)
def test_create_draft_failure(self, data):
"""
Ensure we cannot create a new draft object with POST data that is missing required properties
Applications require: application_type, export_type & name
Exhibition clearances require: application_type & name
Above is a mixture of invalid combinations for these cases
"""
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(BaseApplication.objects.count(), 0)
def test_create_no_application_type_failure(self):
"""
Ensure that we cannot create a new application without
providing a application_type.
"""
data = {}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json()["errors"]["application_type"][0], strings.Applications.Generic.SELECT_A_LICENCE_TYPE
)
@parameterized.expand(
[(CaseTypeEnum.SICL.reference, StandardApplication), (CaseTypeEnum.OICL.reference, OpenApplication)]
)
def test_trade_control_application(self, case_type, model):
data = {
"name": "Test",
"application_type": case_type,
"trade_control_activity": TradeControlActivity.OTHER,
"trade_control_activity_other": "other activity type",
"trade_control_product_categories": [key for key, _ in TradeControlProductCategory.choices],
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
application_id = response.json()["id"]
application = model.objects.get(id=application_id)
self.assertEqual(application.trade_control_activity, data["trade_control_activity"])
self.assertEqual(application.trade_control_activity_other, data["trade_control_activity_other"])
self.assertEqual(
set(application.trade_control_product_categories), set(data["trade_control_product_categories"])
)
@parameterized.expand(
[
(
CaseTypeEnum.SICL.reference,
"trade_control_activity",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_ERROR,
),
(
CaseTypeEnum.SICL.reference,
"trade_control_activity_other",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_OTHER_ERROR,
),
(
CaseTypeEnum.SICL.reference,
"trade_control_product_categories",
strings.Applications.Generic.TRADE_CONTROl_PRODUCT_CATEGORY_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_activity",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_activity_other",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_OTHER_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_product_categories",
strings.Applications.Generic.TRADE_CONTROl_PRODUCT_CATEGORY_ERROR,
),
]
)
def test_trade_control_application_failure(self, case_type, missing_field, expected_error):
data = {
"name": "Test",
"application_type": case_type,
"trade_control_activity": TradeControlActivity.OTHER,
"trade_control_activity_other": "other activity type",
"trade_control_product_categories": [key for key, _ in TradeControlProductCategory.choices],
}
data.pop(missing_field)
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
errors = response.json()["errors"]
self.assertEqual(errors[missing_field], [expected_error])
|
[
"api.applications.models.StandardApplication.objects.get",
"api.applications.models.GiftingClearanceApplication.objects.count",
"api.applications.models.F680ClearanceApplication.objects.get",
"api.applications.models.F680ClearanceApplication.objects.count",
"api.applications.models.StandardApplication.objects.count",
"rest_framework.reverse.reverse",
"parameterized.parameterized.expand",
"api.applications.models.BaseApplication.objects.count",
"api.applications.models.ExhibitionClearanceApplication.objects.count",
"api.applications.models.HmrcQuery.objects.count",
"api.applications.models.GiftingClearanceApplication.objects.get",
"api.applications.models.OpenApplication.objects.count"
] |
[((767, 803), 'rest_framework.reverse.reverse', 'reverse', (['"""applications:applications"""'], {}), "('applications:applications')\n", (774, 803), False, 'from rest_framework.reverse import reverse\n'), ((5485, 5839), 'parameterized.parameterized.expand', 'parameterized.expand', (["[[{}], [{'application_type': CaseTypeReferenceEnum.SIEL, 'export_type':\n ApplicationExportType.TEMPORARY}], [{'name': 'Test', 'export_type':\n ApplicationExportType.TEMPORARY}], [{'name': 'Test', 'application_type':\n CaseTypeReferenceEnum.SIEL}], [{'application_type':\n CaseTypeReferenceEnum.EXHC}], [{'name': 'Test'}]]"], {}), "([[{}], [{'application_type': CaseTypeReferenceEnum.\n SIEL, 'export_type': ApplicationExportType.TEMPORARY}], [{'name':\n 'Test', 'export_type': ApplicationExportType.TEMPORARY}], [{'name':\n 'Test', 'application_type': CaseTypeReferenceEnum.SIEL}], [{\n 'application_type': CaseTypeReferenceEnum.EXHC}], [{'name': 'Test'}]])\n", (5505, 5839), False, 'from parameterized import parameterized\n'), ((7013, 7140), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(CaseTypeEnum.SICL.reference, StandardApplication), (CaseTypeEnum.OICL.\n reference, OpenApplication)]'], {}), '([(CaseTypeEnum.SICL.reference, StandardApplication), (\n CaseTypeEnum.OICL.reference, OpenApplication)])\n', (7033, 7140), False, 'from parameterized import parameterized\n'), ((8157, 8989), 'parameterized.parameterized.expand', 'parameterized.expand', (["[(CaseTypeEnum.SICL.reference, 'trade_control_activity', strings.\n Applications.Generic.TRADE_CONTROL_ACTIVITY_ERROR), (CaseTypeEnum.SICL.\n reference, 'trade_control_activity_other', strings.Applications.Generic\n .TRADE_CONTROL_ACTIVITY_OTHER_ERROR), (CaseTypeEnum.SICL.reference,\n 'trade_control_product_categories', strings.Applications.Generic.\n TRADE_CONTROl_PRODUCT_CATEGORY_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_activity', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_activity_other', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_OTHER_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_product_categories', strings.Applications.Generic.\n TRADE_CONTROl_PRODUCT_CATEGORY_ERROR)]"], {}), "([(CaseTypeEnum.SICL.reference,\n 'trade_control_activity', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_ERROR), (CaseTypeEnum.SICL.reference,\n 'trade_control_activity_other', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_OTHER_ERROR), (CaseTypeEnum.SICL.reference,\n 'trade_control_product_categories', strings.Applications.Generic.\n TRADE_CONTROl_PRODUCT_CATEGORY_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_activity', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_activity_other', strings.Applications.Generic.\n TRADE_CONTROL_ACTIVITY_OTHER_ERROR), (CaseTypeEnum.OICL.reference,\n 'trade_control_product_categories', strings.Applications.Generic.\n TRADE_CONTROl_PRODUCT_CATEGORY_ERROR)])\n", (8177, 8989), False, 'from parameterized import parameterized\n'), ((1455, 1488), 'api.applications.models.StandardApplication.objects.get', 'StandardApplication.objects.get', ([], {}), '()\n', (1486, 1488), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((2750, 2791), 'api.applications.models.GiftingClearanceApplication.objects.get', 'GiftingClearanceApplication.objects.get', ([], {}), '()\n', (2789, 2791), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((3522, 3560), 'api.applications.models.F680ClearanceApplication.objects.get', 'F680ClearanceApplication.objects.get', ([], {}), '()\n', (3558, 3560), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((1663, 1698), 'api.applications.models.StandardApplication.objects.count', 'StandardApplication.objects.count', ([], {}), '()\n', (1696, 1698), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((1899, 1945), 'api.applications.models.ExhibitionClearanceApplication.objects.count', 'ExhibitionClearanceApplication.objects.count', ([], {}), '()\n', (1943, 1945), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((2242, 2288), 'api.applications.models.ExhibitionClearanceApplication.objects.count', 'ExhibitionClearanceApplication.objects.count', ([], {}), '()\n', (2286, 2288), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((2486, 2529), 'api.applications.models.GiftingClearanceApplication.objects.count', 'GiftingClearanceApplication.objects.count', ([], {}), '()\n', (2527, 2529), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((2890, 2933), 'api.applications.models.GiftingClearanceApplication.objects.count', 'GiftingClearanceApplication.objects.count', ([], {}), '()\n', (2931, 2933), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((3261, 3301), 'api.applications.models.F680ClearanceApplication.objects.count', 'F680ClearanceApplication.objects.count', ([], {}), '()\n', (3299, 3301), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((3659, 3699), 'api.applications.models.F680ClearanceApplication.objects.count', 'F680ClearanceApplication.objects.count', ([], {}), '()\n', (3697, 3699), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((4442, 4473), 'api.applications.models.OpenApplication.objects.count', 'OpenApplication.objects.count', ([], {}), '()\n', (4471, 4473), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((4962, 4987), 'api.applications.models.HmrcQuery.objects.count', 'HmrcQuery.objects.count', ([], {}), '()\n', (4985, 4987), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((5449, 5474), 'api.applications.models.HmrcQuery.objects.count', 'HmrcQuery.objects.count', ([], {}), '()\n', (5472, 5474), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n'), ((6468, 6499), 'api.applications.models.BaseApplication.objects.count', 'BaseApplication.objects.count', ([], {}), '()\n', (6497, 6499), False, 'from api.applications.models import StandardApplication, OpenApplication, HmrcQuery, BaseApplication, ExhibitionClearanceApplication, GiftingClearanceApplication, F680ClearanceApplication\n')]
|
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
import pytest
import htmap
TIMEOUT = 300
@pytest.mark.timeout(TIMEOUT)
def test_rerun(mapped_doubler):
m = mapped_doubler.map([1])
m.wait()
m.rerun()
assert list(m) == [2]
@pytest.mark.timeout(TIMEOUT)
def test_load_then_rerun(mapped_doubler):
m = mapped_doubler.map([1], tag="load-then-rerun")
m.wait()
loaded = htmap.load("load-then-rerun")
loaded.rerun()
assert list(loaded) == [2]
@pytest.mark.timeout(TIMEOUT)
def test_rerun_out_of_range_component_raises(mapped_doubler):
m = mapped_doubler.map([1], tag="load-then-rerun")
m.wait()
with pytest.raises(htmap.exceptions.CannotRerunComponents):
m.rerun([5])
@pytest.fixture(scope="function")
def sleepy_doubler_that_writes_a_file():
@htmap.mapped
def sleepy_double(x):
time.sleep(1)
r = x * 2
p = Path("foo")
p.write_text("hi")
htmap.transfer_output_files(p)
return r
return sleepy_double
@pytest.mark.timeout(TIMEOUT)
def test_rerun_removes_current_output_file(sleepy_doubler_that_writes_a_file):
m = sleepy_doubler_that_writes_a_file.map([1], tag="load-then-rerun")
m.wait()
assert m.get(0) == 2
m.rerun()
with pytest.raises(htmap.exceptions.OutputNotFound):
m[0]
@pytest.mark.timeout(TIMEOUT)
def test_rerun_removes_current_user_output_file(sleepy_doubler_that_writes_a_file):
m = sleepy_doubler_that_writes_a_file.map([1], tag="load-then-rerun")
m.wait()
assert (m.output_files.get(0) / "foo").read_text() == "hi"
m.rerun()
with pytest.raises(FileNotFoundError):
(m.output_files[0] / "foo").read_text()
|
[
"htmap.load",
"pytest.fixture",
"time.sleep",
"pytest.raises",
"pathlib.Path",
"htmap.transfer_output_files",
"pytest.mark.timeout"
] |
[((731, 759), 'pytest.mark.timeout', 'pytest.mark.timeout', (['TIMEOUT'], {}), '(TIMEOUT)\n', (750, 759), False, 'import pytest\n'), ((882, 910), 'pytest.mark.timeout', 'pytest.mark.timeout', (['TIMEOUT'], {}), '(TIMEOUT)\n', (901, 910), False, 'import pytest\n'), ((1119, 1147), 'pytest.mark.timeout', 'pytest.mark.timeout', (['TIMEOUT'], {}), '(TIMEOUT)\n', (1138, 1147), False, 'import pytest\n'), ((1367, 1399), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1381, 1399), False, 'import pytest\n'), ((1661, 1689), 'pytest.mark.timeout', 'pytest.mark.timeout', (['TIMEOUT'], {}), '(TIMEOUT)\n', (1680, 1689), False, 'import pytest\n'), ((1972, 2000), 'pytest.mark.timeout', 'pytest.mark.timeout', (['TIMEOUT'], {}), '(TIMEOUT)\n', (1991, 2000), False, 'import pytest\n'), ((1035, 1064), 'htmap.load', 'htmap.load', (['"""load-then-rerun"""'], {}), "('load-then-rerun')\n", (1045, 1064), False, 'import htmap\n'), ((1288, 1341), 'pytest.raises', 'pytest.raises', (['htmap.exceptions.CannotRerunComponents'], {}), '(htmap.exceptions.CannotRerunComponents)\n', (1301, 1341), False, 'import pytest\n'), ((1493, 1506), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1503, 1506), False, 'import time\n'), ((1537, 1548), 'pathlib.Path', 'Path', (['"""foo"""'], {}), "('foo')\n", (1541, 1548), False, 'from pathlib import Path\n'), ((1584, 1614), 'htmap.transfer_output_files', 'htmap.transfer_output_files', (['p'], {}), '(p)\n', (1611, 1614), False, 'import htmap\n'), ((1908, 1954), 'pytest.raises', 'pytest.raises', (['htmap.exceptions.OutputNotFound'], {}), '(htmap.exceptions.OutputNotFound)\n', (1921, 1954), False, 'import pytest\n'), ((2262, 2294), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2275, 2294), False, 'import pytest\n')]
|
#!/usr/bin/env python
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'^$', 'pyhn.apps.news.views.index.index', name='index'),
url(
r'^social/', include('social.apps.django_app.urls', namespace='social')
),
url(r'^news/', include('pyhn.apps.news.urls', namespace='news')),
url(r'^accounts/', include('pyhn.apps.account.urls', namespace='account')),
url(
r'^user/(?P<user_id>\d+)/', 'pyhn.apps.account.views.user_profile',
name='profile'
),
)
|
[
"django.conf.urls.include",
"django.conf.urls.url"
] |
[((114, 173), 'django.conf.urls.url', 'url', (['"""^$"""', '"""pyhn.apps.news.views.index.index"""'], {'name': '"""index"""'}), "('^$', 'pyhn.apps.news.views.index.index', name='index')\n", (117, 173), False, 'from django.conf.urls import patterns, include, url\n'), ((426, 517), 'django.conf.urls.url', 'url', (['"""^user/(?P<user_id>\\\\d+)/"""', '"""pyhn.apps.account.views.user_profile"""'], {'name': '"""profile"""'}), "('^user/(?P<user_id>\\\\d+)/', 'pyhn.apps.account.views.user_profile',\n name='profile')\n", (429, 517), False, 'from django.conf.urls import patterns, include, url\n'), ((206, 264), 'django.conf.urls.include', 'include', (['"""social.apps.django_app.urls"""'], {'namespace': '"""social"""'}), "('social.apps.django_app.urls', namespace='social')\n", (213, 264), False, 'from django.conf.urls import patterns, include, url\n'), ((291, 339), 'django.conf.urls.include', 'include', (['"""pyhn.apps.news.urls"""'], {'namespace': '"""news"""'}), "('pyhn.apps.news.urls', namespace='news')\n", (298, 339), False, 'from django.conf.urls import patterns, include, url\n'), ((365, 419), 'django.conf.urls.include', 'include', (['"""pyhn.apps.account.urls"""'], {'namespace': '"""account"""'}), "('pyhn.apps.account.urls', namespace='account')\n", (372, 419), False, 'from django.conf.urls import patterns, include, url\n')]
|
import random
from collections import Counter
from datetime import datetime
from typing import List, Tuple, Union
import discord
from discord import Embed
from discord.ext import commands
from crimsobot.models.currency_account import CurrencyAccount
from crimsobot.models.guess_statistic import GuessStatistic
from crimsobot.utils import tools as c
DiscordUser = Union[discord.User, discord.Member]
def get_crimsoball_answer(ctx: commands.Context) -> str: # function to give first answer a ctx to work with
# don't know if this is any better than just putting it
# inside of the crimsoball command
answer_list = [
'{} haha ping'.format(ctx.message.author.mention),
'ye!',
'**no**',
'what do you think?',
'*perhaps*',
'OMAN',
"i can't answer this, you need an adult",
'absolutely!\n\n\n`not`',
'of course!',
'according to quantum superposition, the answer was both yes and no before you asked.',
"is the sky blue?\n\n(is it? i don't know. i don't have eyes.)",
"i can't be bothered with this right now.",
'funny you should ask--',
'fine, sure, whatever',
'<:xok:551174281367650356>',
'ask seannerz. ping him now and ask.',
'ehhhh sure',
'hmmmm. no.',
'uhhhhhhhhh',
'<:uhhhh:495249068789071882>',
'eat glass!',
'it is important that you stop bothering me.',
'you CANNOT be serious',
'sure? how would i know?',
'what heck',
'random_response', # leave this alone
]
return random.choice(answer_list)
def emojistring() -> str:
emojis = []
for line in open(c.clib_path_join('games', 'emojilist.txt'), encoding='utf-8', errors='ignore'):
line = line.replace('\n', '')
emojis.append(line)
emoji_string = random.sample(''.join(emojis), random.randint(3, 5))
return ' '.join(emoji_string)
def tally(ballots: List[str]) -> Tuple[str, int]:
counter = Counter(sorted(ballots))
winner = counter.most_common(1)[0]
return winner
def winner_list(winners: List[str]) -> str:
if len(winners) > 1:
winners_ = ', '.join(winners[:-1])
winners_ = winners_ + ' & ' + winners[-1] # winner, winner & winner
else:
winners_ = winners[0]
return winners_
def get_story() -> str:
story = open(
c.clib_path_join('games', 'madlibs.txt'),
encoding='utf-8',
errors='ignore'
).readlines()
story = [line[:-1] for line in story]
story = [line.replace('\\n', '\n') for line in story]
return random.choice(story)
def get_keys(format_string: str) -> List[str]:
"""format_string is a format string with embedded dictionary keys.
Return a set containing all the keys from the format string."""
keys = []
end = 0
repetitions = format_string.count('{')
for _ in range(repetitions):
start = format_string.find('{', end) + 1 # pass the '{'
end = format_string.find('}', start)
key = format_string[start:end]
keys.append(key) # may add duplicates
# find indices of marked tags (to be used more than once)
ind = [i for i, s in enumerate(keys) if '#' in s]
# isolate the marked tags and keep one instance each
mults = []
for ele in ind:
mults.append(keys[ele])
mults = list(set(mults))
# delete all marked tags from original list
for ele in sorted(ind, reverse=True):
del keys[ele]
# ...and add back one instance each
keys = keys + mults
return keys
async def win(discord_user: DiscordUser, amount: float) -> None:
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
account.add_to_balance(amount)
await account.save()
async def daily(discord_user: DiscordUser, lucky_number: int) -> Embed:
# fetch account
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
# get current time
now = datetime.utcnow()
# arbitrary "last date collected" and reset time (midnight UTC)
reset = datetime(1969, 7, 20, 0, 0, 0) # ymd required but will not be used
last = account.ran_daily_at
# check if dates are same; if so, gotta wait
if last and last.strftime('%Y-%m-%d') == now.strftime('%Y-%m-%d'):
hours = (reset - now).seconds / 3600
minutes = (hours - int(hours)) * 60
title = 'Patience...'
award_string = 'Daily award resets at midnight UTC, {}h{}m from now.'.format(int(hours), int(minutes + 1))
thumb = 'clock'
color = 'orange'
# if no wait, then check if winner or loser
else:
winning_number = random.randint(1, 100)
if winning_number == lucky_number:
daily_award = 500
title = 'JACKPOT!'
wrong = '' # they're not wrong!
thumb = 'moneymouth'
color = 'green'
else:
daily_award = 10
title_choices = [
'*heck*',
'*frick*',
'*womp womp*',
'**😩**',
'Aw shucks.',
'Why even bother?',
]
title = random.choice(title_choices)
wrong = 'The winning number this time was **{}**, but no worries:'.format(winning_number)
thumb = 'crimsoCOIN'
color = 'yellow'
# update daily then save
account.ran_daily_at = now
await account.save()
# update their balance now (will repoen and reclose user)
await win(discord_user, daily_award)
award_string = '{} You have been awarded your daily **\u20A2{:.2f}**!'.format(wrong, daily_award)
thumb = thumb
color = color
# the embed to return
embed = c.crimbed(
title=title,
descr=award_string,
thumb_name=thumb,
color_name=color,
)
return embed
async def check_balance(discord_user: DiscordUser) -> float:
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
return account.get_balance()
def guess_economy(n: int) -> Tuple[float, float]:
""" input: integer
output: float, float"""
# winnings for each n=0,...,20
winnings = [0, 7, 2, 4, 7, 11, 15, 20, 25, 30, 36, 42, 49, 56, 64, 72, 80, 95, 120, 150, 200]
# variables for cost function
const = 0.0095 # dampener multiplier
sweet = 8 # sweet spot for guess
favor = 1.3 # favor to player (against house) at sweet spot
# conditionals
if n > 2:
cost = winnings[n] / n - (-const * (n - sweet) ** 2 + favor)
else:
cost = 0.00
return winnings[n], cost
async def guess_luck(discord_user: DiscordUser, n: int, won: bool) -> None:
stats = await GuessStatistic.get_by_discord_user(discord_user) # type: GuessStatistic
stats.plays += 1
stats.add_to_expected_wins(n)
if won:
stats.wins += 1
await stats.save()
# async def guess_luck_balance(discord_user: DiscordUser) -> Tuple[float, int]:
# stats = await GuessStatistic.get_by_discord_user(discord_user) # type: GuessStatistic
# return stats.luck_index, stats.plays
async def guess_stat_embed(user: DiscordUser) -> Embed:
"""Return a big ol' embed of Guessmoji! stats"""
s = await GuessStatistic.get_by_discord_user(user)
if s.plays == 0:
embed = c.crimbed(
title='HOW—',
descr="You haven't played GUESSMOJI! yet!",
thumb_name='weary',
footer='Play >guess [n] today!',
)
else:
embed = c.crimbed(
title='GUESSMOJI! stats for {}'.format(user),
descr=None,
thumb_name='crimsoCOIN',
footer='Stat tracking as of {d.year}-{d.month:02d}-{d.day:02d}'.format(d=s.created_at),
)
ess = '' if s.plays == 1 else 's'
ess2 = '' if s.wins == 1 else 's'
# list of tuples (name, value) for embed.add_field
field_list = [
(
'Gameplay',
'**{}** game{ess} played, **{}** win{ess2}'.format(s.plays, s.wins, ess=ess, ess2=ess2)
),
(
'Luck index (expected: 100)',
'**{:.3f}**'.format(100 * s.luck_index)
),
]
for field in field_list:
embed.add_field(name=field[0], value=field[1], inline=False)
return embed
def guesslist() -> str:
output = [' n · cost · payout',
'·························']
for i in range(2, 21):
spc = '\u2002' if i < 10 else ''
w, c = guess_economy(i)
output.append('{}{:>d} · \u20A2{:>5.2f} · \u20A2{:>6.2f}'.format(spc, i, c, w))
return '\n'.join(output)
|
[
"random.randint",
"random.choice",
"crimsobot.utils.tools.clib_path_join",
"datetime.datetime",
"crimsobot.models.currency_account.CurrencyAccount.get_by_discord_user",
"datetime.datetime.utcnow",
"crimsobot.models.guess_statistic.GuessStatistic.get_by_discord_user",
"crimsobot.utils.tools.crimbed"
] |
[((1712, 1738), 'random.choice', 'random.choice', (['answer_list'], {}), '(answer_list)\n', (1725, 1738), False, 'import random\n'), ((2734, 2754), 'random.choice', 'random.choice', (['story'], {}), '(story)\n', (2747, 2754), False, 'import random\n'), ((4153, 4170), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4168, 4170), False, 'from datetime import datetime\n'), ((4252, 4282), 'datetime.datetime', 'datetime', (['(1969)', '(7)', '(20)', '(0)', '(0)', '(0)'], {}), '(1969, 7, 20, 0, 0, 0)\n', (4260, 4282), False, 'from datetime import datetime\n'), ((5953, 6031), 'crimsobot.utils.tools.crimbed', 'c.crimbed', ([], {'title': 'title', 'descr': 'award_string', 'thumb_name': 'thumb', 'color_name': 'color'}), '(title=title, descr=award_string, thumb_name=thumb, color_name=color)\n', (5962, 6031), True, 'from crimsobot.utils import tools as c\n'), ((1804, 1846), 'crimsobot.utils.tools.clib_path_join', 'c.clib_path_join', (['"""games"""', '"""emojilist.txt"""'], {}), "('games', 'emojilist.txt')\n", (1820, 1846), True, 'from crimsobot.utils import tools as c\n'), ((2001, 2021), 'random.randint', 'random.randint', (['(3)', '(5)'], {}), '(3, 5)\n', (2015, 2021), False, 'import random\n'), ((3795, 3844), 'crimsobot.models.currency_account.CurrencyAccount.get_by_discord_user', 'CurrencyAccount.get_by_discord_user', (['discord_user'], {}), '(discord_user)\n', (3830, 3844), False, 'from crimsobot.models.currency_account import CurrencyAccount\n'), ((4044, 4093), 'crimsobot.models.currency_account.CurrencyAccount.get_by_discord_user', 'CurrencyAccount.get_by_discord_user', (['discord_user'], {}), '(discord_user)\n', (4079, 4093), False, 'from crimsobot.models.currency_account import CurrencyAccount\n'), ((4841, 4863), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (4855, 4863), False, 'import random\n'), ((6171, 6220), 'crimsobot.models.currency_account.CurrencyAccount.get_by_discord_user', 'CurrencyAccount.get_by_discord_user', (['discord_user'], {}), '(discord_user)\n', (6206, 6220), False, 'from crimsobot.models.currency_account import CurrencyAccount\n'), ((6959, 7007), 'crimsobot.models.guess_statistic.GuessStatistic.get_by_discord_user', 'GuessStatistic.get_by_discord_user', (['discord_user'], {}), '(discord_user)\n', (6993, 7007), False, 'from crimsobot.models.guess_statistic import GuessStatistic\n'), ((7492, 7532), 'crimsobot.models.guess_statistic.GuessStatistic.get_by_discord_user', 'GuessStatistic.get_by_discord_user', (['user'], {}), '(user)\n', (7526, 7532), False, 'from crimsobot.models.guess_statistic import GuessStatistic\n'), ((7571, 7695), 'crimsobot.utils.tools.crimbed', 'c.crimbed', ([], {'title': '"""HOW—"""', 'descr': '"""You haven\'t played GUESSMOJI! yet!"""', 'thumb_name': '"""weary"""', 'footer': '"""Play >guess [n] today!"""'}), '(title=\'HOW—\', descr="You haven\'t played GUESSMOJI! yet!",\n thumb_name=\'weary\', footer=\'Play >guess [n] today!\')\n', (7580, 7695), True, 'from crimsobot.utils import tools as c\n'), ((5360, 5388), 'random.choice', 'random.choice', (['title_choices'], {}), '(title_choices)\n', (5373, 5388), False, 'import random\n'), ((2511, 2551), 'crimsobot.utils.tools.clib_path_join', 'c.clib_path_join', (['"""games"""', '"""madlibs.txt"""'], {}), "('games', 'madlibs.txt')\n", (2527, 2551), True, 'from crimsobot.utils import tools as c\n')]
|
from ..const.const import (
MONTH_TO_NUMBER,
SENSOR_LOCATIONS_TO_URL,
_LOGGER,
)
from datetime import datetime, date
from bs4 import BeautifulSoup
import urllib.request
import urllib.error
class VenloAfval(object):
def get_date_from_afvaltype(self, tableRows, afvaltype):
try:
for row in tableRows:
garbageDate = row.find("td")
garbageType = row.find("span")
if garbageDate and garbageType:
garbageDate = row.find("td").string
garbageType = row.find("span").string
#Does the afvaltype match...
if garbageType == afvaltype:
day = garbageDate.split()[1]
month = MONTH_TO_NUMBER[garbageDate.split()[2]]
year = str(
datetime.today().year
if datetime.today().month <= int(month)
else datetime.today().year + 1
)
garbageDate = year + "-" + month + "-" + day
if datetime.strptime(garbageDate, '%Y-%m-%d').date() >= date.today():
return garbageDate
# if nothing was found
return ""
except Exception as exc:
_LOGGER.error("Error occurred while splitting data: %r", exc)
return ""
def get_data(self, city, postcode, street_number):
_LOGGER.debug("Updating Waste collection dates")
try:
url = SENSOR_LOCATIONS_TO_URL["venlo"][0].format(
postcode, street_number
)
req = urllib.request.Request(url=url)
f = urllib.request.urlopen(req)
html = f.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
html = soup.find("div", {"class": "trash-removal-calendar"})
tableRows = html.findAll("tr")
# Place all possible values in the dictionary even if they are not necessary
waste_dict = {}
# GFT
waste_dict["gft"] = self.get_date_from_afvaltype(tableRows, "GFT")
# Restafval
waste_dict["restafval"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD")
# PMD
waste_dict["pbd"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD")
return waste_dict
except urllib.error.URLError as exc:
_LOGGER.error("Error occurred while fetching data: %r", exc.reason)
return False
|
[
"bs4.BeautifulSoup",
"datetime.datetime.strptime",
"datetime.datetime.today",
"datetime.date.today"
] |
[((1852, 1886), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1865, 1886), False, 'from bs4 import BeautifulSoup\n'), ((1204, 1216), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1214, 1216), False, 'from datetime import datetime, date\n'), ((879, 895), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (893, 895), False, 'from datetime import datetime, date\n'), ((1151, 1193), 'datetime.datetime.strptime', 'datetime.strptime', (['garbageDate', '"""%Y-%m-%d"""'], {}), "(garbageDate, '%Y-%m-%d')\n", (1168, 1193), False, 'from datetime import datetime, date\n'), ((932, 948), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (946, 948), False, 'from datetime import datetime, date\n'), ((1002, 1018), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1016, 1018), False, 'from datetime import datetime, date\n')]
|
"""
================================
Neuropathic pain - Neurofeedback
================================
"""
import logging
from typing import Literal, TypeVar
from bci_framework.extensions.stimuli_delivery import StimuliAPI, Feedback, DeliveryInstance
from bci_framework.extensions.stimuli_delivery.utils import Widgets as w
from bci_framework.extensions import properties as prop
from browser import document, html, timer
Ts = TypeVar('Time in seconds')
Tm = TypeVar('Time in milliseconds')
TM = TypeVar('Time in minutes')
bands = {
'alpha': [[1, 5], 'increase'],
'beta': [[5, 10], 'decrease'],
'teta': [[10, 15], 'decrease'],
}
########################################################################
class NPNeurofeedback(StimuliAPI):
""""""
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_stylesheet('styles.css')
self.show_cross()
self.show_synchronizer()
self.feedback = Feedback(self, 'PowerBandNeuroFeedback')
self.feedback.on_feedback(self.on_input_feedback)
self.bci_stimuli <= html.DIV(id='stimuli')
self.dashboard <= w.label(
'NeuropathicPain - Neurofeedback', 'headline4'
)
self.dashboard <= html.BR()
self.dashboard <= w.subject_information(
paradigm='NeuropathicPain - Neurofeedback'
)
self.dashboard <= w.slider(
label='Baseline acquisition:',
min=0,
value=0.1,
max=5,
step=0.1,
unit='m',
id='baseline_duration',
)
self.dashboard <= w.slider(
label='Sesion duration:',
min=5,
value=10,
max=30,
step=0.1,
unit='m',
id='sesion_duration',
)
self.dashboard <= w.slider(
label='Window analysis:',
min=0.5,
max=2,
value=1,
step=0.1,
unit='s',
id='window_analysis',
)
self.dashboard <= w.slider(
label='Sliding data:',
min=0.1,
max=2,
value=1,
unit='s',
step=0.1,
id='sliding_data',
)
self.dashboard <= w.select(
'Analysis Function',
[['Fourier', 'fourier'], ['Welch', 'welch']],
value='fourier',
id='method',
)
self.dashboard <= w.switch(
label='Record EEG',
checked=False,
id='record',
)
self.dashboard <= w.toggle_button(
[
('Start session', self.start),
('Stop session', self.stop_session),
],
id='run',
)
# self.dashboard <= w.slider(
# label='Test feedback:',
# min=-1,
# max=1,
# value=0,
# step=0.1,
# id='test',
# on_change=self.test_feedback,
# )
# # ----------------------------------------------------------------------
# @DeliveryInstance.both
# def test_feedback(self, value):
# """Test the feedback stimuli."""
# self.on_input_feedback(
# **{
# 'feedback': value,
# }
# )
# ----------------------------------------------------------------------
def on_input_feedback(self, **feedback: dict[str, [str, int]]) -> None:
"""Asynchronous method to receive the feedback process value.
`feedback` is a dictionary with the keys:
* `feedback`: The feedback value, an `int` between -1 and 1.
* `baseline`: The baseline value freezed.
"""
f = feedback['feedback']
# logging.warning(f'FEEDBACK: {f}')
plot = self.BandFeedback.neurofeedback(f)
# document.select_one('#stimuli').clear()
# self.update_plot(plot)
# @DeliveryInstance.remote
# def update_plot(self, plot):
document.select_one('#stimuli').style = {
'background-image': f'url(data:image/png;base64,{plot})',
}
# ----------------------------------------------------------------------
def start(self) -> None:
"""Start the session.
A session comprises a baseline calculation and a neurofeedback trial.
"""
if w.get_value('record'):
self.start_record()
self.build_trials()
self.show_counter(5)
timer.set_timeout(self.start_session, 5000)
# ----------------------------------------------------------------------
def start_session(self) -> None:
"""Execute the session pipeline."""
logging.warning('START_SESSION')
self.run_pipeline(
self.pipeline_trial, self.trials, callback='stop_session'
)
# ----------------------------------------------------------------------
def stop_session(self) -> None:
"""Stop pipeline execution."""
document.select_one('#stimuli').style = {'display': 'none'}
self.stop_analyser()
w.get_value('run').off()
if w.get_value('record'):
timer.set_timeout(self.stop_record, 2000)
# ----------------------------------------------------------------------
def build_trials(self) -> None:
"""Define the session and single session pipeline."""
baseline_duration = w.get_value('baseline_duration') * 60
sesion_duration = w.get_value('sesion_duration') * 60
baseline_packages = baseline_duration // w.get_value('sliding_data')
logging.warning(f'BP: {baseline_packages}')
self.trials = [
{
'method': w.get_value('method'),
'window_analysis': w.get_value('window_analysis'),
'sliding_data': w.get_value('sliding_data') * prop.SAMPLE_RATE,
'baseline_packages': baseline_packages,
},
]
self.pipeline_trial = [
['stop_analyser', 100],
['configure_analyser', 1000],
['baseline', baseline_duration * 1000],
['session', sesion_duration * 1000],
['stop_analyser', 1000],
]
# ----------------------------------------------------------------------
def configure_analyser(
self,
method,
window_analysis: Ts,
sliding_data: int,
baseline_packages: int,
) -> None:
"""Send the configuration values to the generator."""
data = {
'status': 'on',
'method': method,
'window_analysis': window_analysis,
'sliding_data': sliding_data,
'baseline_packages': baseline_packages,
'channels': list(prop.CHANNELS.values()),
'target_channels': list(prop.CHANNELS.values()),
'sample_rate': int(prop.SAMPLE_RATE),
'bands': bands,
}
logging.warning(f'CONFIG: {data}')
self.feedback.write(data)
# ----------------------------------------------------------------------
def baseline(self) -> None:
"""Acquire data to use in the zero location."""
self.show_cross()
self.send_marker('Start baseline')
document.select_one('#stimuli').style = {'display': 'none'}
# ----------------------------------------------------------------------
def session(self) -> None:
"""Neurofeedback activity."""
self.hide_cross()
self.send_marker('End baseline')
self.feedback.write({'command': 'freeze_baseline'}) # zero location
document.select_one('#stimuli').style = {'display': 'block'}
# ----------------------------------------------------------------------
def stop_analyser(self) -> None:
"""Stop feedback values generation."""
self.feedback.write(
{
'status': 'off',
}
)
if __name__ == '__main__':
NPNeurofeedback(python=('feedback.py', 'BandFeedback'))
|
[
"browser.html.BR",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.slider",
"bci_framework.extensions.properties.CHANNELS.values",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value",
"logging.warning",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.label",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.toggle_button",
"browser.document.select_one",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.select",
"browser.html.DIV",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.subject_information",
"bci_framework.extensions.stimuli_delivery.utils.Widgets.switch",
"bci_framework.extensions.stimuli_delivery.Feedback",
"typing.TypeVar",
"browser.timer.set_timeout"
] |
[((434, 460), 'typing.TypeVar', 'TypeVar', (['"""Time in seconds"""'], {}), "('Time in seconds')\n", (441, 460), False, 'from typing import Literal, TypeVar\n'), ((466, 497), 'typing.TypeVar', 'TypeVar', (['"""Time in milliseconds"""'], {}), "('Time in milliseconds')\n", (473, 497), False, 'from typing import Literal, TypeVar\n'), ((503, 529), 'typing.TypeVar', 'TypeVar', (['"""Time in minutes"""'], {}), "('Time in minutes')\n", (510, 529), False, 'from typing import Literal, TypeVar\n'), ((1062, 1102), 'bci_framework.extensions.stimuli_delivery.Feedback', 'Feedback', (['self', '"""PowerBandNeuroFeedback"""'], {}), "(self, 'PowerBandNeuroFeedback')\n", (1070, 1102), False, 'from bci_framework.extensions.stimuli_delivery import StimuliAPI, Feedback, DeliveryInstance\n'), ((4543, 4564), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""record"""'], {}), "('record')\n", (4554, 4564), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((4664, 4707), 'browser.timer.set_timeout', 'timer.set_timeout', (['self.start_session', '(5000)'], {}), '(self.start_session, 5000)\n', (4681, 4707), False, 'from browser import document, html, timer\n'), ((4875, 4907), 'logging.warning', 'logging.warning', (['"""START_SESSION"""'], {}), "('START_SESSION')\n", (4890, 4907), False, 'import logging\n'), ((5309, 5330), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""record"""'], {}), "('record')\n", (5320, 5330), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((5777, 5820), 'logging.warning', 'logging.warning', (['f"""BP: {baseline_packages}"""'], {}), "(f'BP: {baseline_packages}')\n", (5792, 5820), False, 'import logging\n'), ((7156, 7190), 'logging.warning', 'logging.warning', (['f"""CONFIG: {data}"""'], {}), "(f'CONFIG: {data}')\n", (7171, 7190), False, 'import logging\n'), ((1190, 1212), 'browser.html.DIV', 'html.DIV', ([], {'id': '"""stimuli"""'}), "(id='stimuli')\n", (1198, 1212), False, 'from browser import document, html, timer\n'), ((1240, 1295), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.label', 'w.label', (['"""NeuropathicPain - Neurofeedback"""', '"""headline4"""'], {}), "('NeuropathicPain - Neurofeedback', 'headline4')\n", (1247, 1295), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((1344, 1353), 'browser.html.BR', 'html.BR', ([], {}), '()\n', (1351, 1353), False, 'from browser import document, html, timer\n'), ((1381, 1446), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.subject_information', 'w.subject_information', ([], {'paradigm': '"""NeuropathicPain - Neurofeedback"""'}), "(paradigm='NeuropathicPain - Neurofeedback')\n", (1402, 1446), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((1496, 1608), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.slider', 'w.slider', ([], {'label': '"""Baseline acquisition:"""', 'min': '(0)', 'value': '(0.1)', 'max': '(5)', 'step': '(0.1)', 'unit': '"""m"""', 'id': '"""baseline_duration"""'}), "(label='Baseline acquisition:', min=0, value=0.1, max=5, step=0.1,\n unit='m', id='baseline_duration')\n", (1504, 1608), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((1726, 1832), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.slider', 'w.slider', ([], {'label': '"""Sesion duration:"""', 'min': '(5)', 'value': '(10)', 'max': '(30)', 'step': '(0.1)', 'unit': '"""m"""', 'id': '"""sesion_duration"""'}), "(label='Sesion duration:', min=5, value=10, max=30, step=0.1, unit=\n 'm', id='sesion_duration')\n", (1734, 1832), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((1950, 2056), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.slider', 'w.slider', ([], {'label': '"""Window analysis:"""', 'min': '(0.5)', 'max': '(2)', 'value': '(1)', 'step': '(0.1)', 'unit': '"""s"""', 'id': '"""window_analysis"""'}), "(label='Window analysis:', min=0.5, max=2, value=1, step=0.1, unit=\n 's', id='window_analysis')\n", (1958, 2056), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((2174, 2273), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.slider', 'w.slider', ([], {'label': '"""Sliding data:"""', 'min': '(0.1)', 'max': '(2)', 'value': '(1)', 'unit': '"""s"""', 'step': '(0.1)', 'id': '"""sliding_data"""'}), "(label='Sliding data:', min=0.1, max=2, value=1, unit='s', step=0.1,\n id='sliding_data')\n", (2182, 2273), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((2392, 2501), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.select', 'w.select', (['"""Analysis Function"""', "[['Fourier', 'fourier'], ['Welch', 'welch']]"], {'value': '"""fourier"""', 'id': '"""method"""'}), "('Analysis Function', [['Fourier', 'fourier'], ['Welch', 'welch']],\n value='fourier', id='method')\n", (2400, 2501), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((2584, 2640), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.switch', 'w.switch', ([], {'label': '"""Record EEG"""', 'checked': '(False)', 'id': '"""record"""'}), "(label='Record EEG', checked=False, id='record')\n", (2592, 2640), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((2715, 2815), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.toggle_button', 'w.toggle_button', (["[('Start session', self.start), ('Stop session', self.stop_session)]"], {'id': '"""run"""'}), "([('Start session', self.start), ('Stop session', self.\n stop_session)], id='run')\n", (2730, 2815), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((4182, 4213), 'browser.document.select_one', 'document.select_one', (['"""#stimuli"""'], {}), "('#stimuli')\n", (4201, 4213), False, 'from browser import document, html, timer\n'), ((5176, 5207), 'browser.document.select_one', 'document.select_one', (['"""#stimuli"""'], {}), "('#stimuli')\n", (5195, 5207), False, 'from browser import document, html, timer\n'), ((5344, 5385), 'browser.timer.set_timeout', 'timer.set_timeout', (['self.stop_record', '(2000)'], {}), '(self.stop_record, 2000)\n', (5361, 5385), False, 'from browser import document, html, timer\n'), ((5591, 5623), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""baseline_duration"""'], {}), "('baseline_duration')\n", (5602, 5623), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((5655, 5685), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""sesion_duration"""'], {}), "('sesion_duration')\n", (5666, 5685), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((5740, 5767), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""sliding_data"""'], {}), "('sliding_data')\n", (5751, 5767), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((7469, 7500), 'browser.document.select_one', 'document.select_one', (['"""#stimuli"""'], {}), "('#stimuli')\n", (7488, 7500), False, 'from browser import document, html, timer\n'), ((7837, 7868), 'browser.document.select_one', 'document.select_one', (['"""#stimuli"""'], {}), "('#stimuli')\n", (7856, 7868), False, 'from browser import document, html, timer\n'), ((5273, 5291), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""run"""'], {}), "('run')\n", (5284, 5291), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((5886, 5907), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""method"""'], {}), "('method')\n", (5897, 5907), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((5944, 5974), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""window_analysis"""'], {}), "('window_analysis')\n", (5955, 5974), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n'), ((6974, 6996), 'bci_framework.extensions.properties.CHANNELS.values', 'prop.CHANNELS.values', ([], {}), '()\n', (6994, 6996), True, 'from bci_framework.extensions import properties as prop\n'), ((7035, 7057), 'bci_framework.extensions.properties.CHANNELS.values', 'prop.CHANNELS.values', ([], {}), '()\n', (7055, 7057), True, 'from bci_framework.extensions import properties as prop\n'), ((6008, 6035), 'bci_framework.extensions.stimuli_delivery.utils.Widgets.get_value', 'w.get_value', (['"""sliding_data"""'], {}), "('sliding_data')\n", (6019, 6035), True, 'from bci_framework.extensions.stimuli_delivery.utils import Widgets as w\n')]
|
"""Provide unit tests for `~python_venv.env`:py:mod:."""
import unittest
import parameterized # https://pypi.org/project/parameterized/
from python_venv import const, env, reqs
class TestEnv_000_General(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_PV_ENV_000_symbols_exist(self):
_ = const.PYTHON
_ = const.CONDA
_ = const.PYENV
_ = const.VENV_DIR
_ = const.DEV_SUFFIX
_ = const.DIST_DIR_PLACEHOLDER
_ = const.ENV_DIR_PLACEHOLDER
_ = const.ENV_TYPES_NAMED
class TestEnv_010_BaseVirtualEnvironment(unittest.TestCase):
def setUp(self):
self.saved_requirements = reqs.REQUIREMENTS
def tearDown(self):
reqs.REQUIREMENTS = self.saved_requirements
def test_PV_ENV_BAS_000_instantiate_empty(self):
with self.assertRaises(TypeError) as raised:
env.BaseVirtualEnvironment()
msg = raised.exception.args[0]
self.assertTrue(
msg.startswith("__init__() missing 1 required positional argument")
)
def test_PV_ENV_BAS_001_instantiate(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertEqual(x.req_scheme, "dummy_req_scheme")
@parameterized.parameterized.expand(
[
("dry_run", {"dry_run": True}, "dry_run", True),
("force", {"force": True}, "force", True),
(
"message_prefix",
{"message_prefix": "dummy_message_prefix"},
"message_prefix",
"dummy_message_prefix",
),
("python", {"python": "dummy_python"}, "python", "dummy_python"),
("basename", {"basename": "dummy_basename"}, "_basename", "dummy_basename"),
("env_name", {"env_name": "dummy_env_name"}, "_env_name", "dummy_env_name"),
(
"env_prefix",
{"env_prefix": "dummy_env_prefix"},
"_env_prefix",
"dummy_env_prefix",
),
]
)
def test_PV_ENV_BAS_002_instantiate_kwargs(self, name, kwargs, attr, value):
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(getattr(x, attr), value)
def test_PV_ENV_BAS_010_requirements(self):
dummy_requirements = {"dummy_req_source": ["dummy_value"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertListEqual(x.requirements.requirements, [dummy_requirements])
def test_PV_ENV_BAS_020_package_name(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertEqual(x.package_name, "python_venv")
@parameterized.parameterized.expand(
[
("default", None, "python-venv"),
("specified", "dummy-package", "dummy-package"),
]
)
def test_PV_ENV_BAS_030_basename(self, name, basename, expected):
kwargs = {} if basename is None else {"basename": basename}
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.basename, expected)
def test_PV_ENV_BAS_040_abstract_env_name(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_name
@parameterized.parameterized.expand(
[
("default", None, ""),
("specified", "dummy-prefix", "dummy-prefix"),
]
)
def test_PV_ENV_BAS_045_env_prefix(self, name, env_prefix, expected):
kwargs = {} if env_prefix is None else {"env_prefix": env_prefix}
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.env_prefix, expected)
def test_PV_ENV_BAS_050_abstract_env_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_dir
def test_PV_ENV_BAS_051_abstract_env_bin_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_bin_dir
def test_PV_ENV_BAS_052_abstract_env_python(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_python
def test_PV_ENV_BAS_055_abstract_abs_env_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.abs_env_dir
def test_PV_ENV_BAS_060_abstract_env_description(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_description
def test_PV_ENV_BAS_100_abstract_create(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.create()
def test_PV_ENV_BAS_200_abstract_remove(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.remove()
|
[
"parameterized.parameterized.expand",
"python_venv.env.BaseVirtualEnvironment"
] |
[((1274, 1850), 'parameterized.parameterized.expand', 'parameterized.parameterized.expand', (["[('dry_run', {'dry_run': True}, 'dry_run', True), ('force', {'force': True},\n 'force', True), ('message_prefix', {'message_prefix':\n 'dummy_message_prefix'}, 'message_prefix', 'dummy_message_prefix'), (\n 'python', {'python': 'dummy_python'}, 'python', 'dummy_python'), (\n 'basename', {'basename': 'dummy_basename'}, '_basename',\n 'dummy_basename'), ('env_name', {'env_name': 'dummy_env_name'},\n '_env_name', 'dummy_env_name'), ('env_prefix', {'env_prefix':\n 'dummy_env_prefix'}, '_env_prefix', 'dummy_env_prefix')]"], {}), "([('dry_run', {'dry_run': True},\n 'dry_run', True), ('force', {'force': True}, 'force', True), (\n 'message_prefix', {'message_prefix': 'dummy_message_prefix'},\n 'message_prefix', 'dummy_message_prefix'), ('python', {'python':\n 'dummy_python'}, 'python', 'dummy_python'), ('basename', {'basename':\n 'dummy_basename'}, '_basename', 'dummy_basename'), ('env_name', {\n 'env_name': 'dummy_env_name'}, '_env_name', 'dummy_env_name'), (\n 'env_prefix', {'env_prefix': 'dummy_env_prefix'}, '_env_prefix',\n 'dummy_env_prefix')])\n", (1308, 1850), False, 'import parameterized\n'), ((2779, 2903), 'parameterized.parameterized.expand', 'parameterized.parameterized.expand', (["[('default', None, 'python-venv'), ('specified', 'dummy-package',\n 'dummy-package')]"], {}), "([('default', None, 'python-venv'), (\n 'specified', 'dummy-package', 'dummy-package')])\n", (2813, 2903), False, 'import parameterized\n'), ((3397, 3507), 'parameterized.parameterized.expand', 'parameterized.parameterized.expand', (["[('default', None, ''), ('specified', 'dummy-prefix', 'dummy-prefix')]"], {}), "([('default', None, ''), ('specified',\n 'dummy-prefix', 'dummy-prefix')])\n", (3431, 3507), False, 'import parameterized\n'), ((1162, 1208), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (1188, 1208), False, 'from python_venv import const, env, reqs\n'), ((2176, 2232), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme', **kwargs)\n", (2202, 2232), False, 'from python_venv import const, env, reqs\n'), ((2482, 2528), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (2508, 2528), False, 'from python_venv import const, env, reqs\n'), ((2670, 2716), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (2696, 2716), False, 'from python_venv import const, env, reqs\n'), ((3098, 3154), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme', **kwargs)\n", (3124, 3154), False, 'from python_venv import const, env, reqs\n'), ((3268, 3314), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (3294, 3314), False, 'from python_venv import const, env, reqs\n'), ((3713, 3769), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme', **kwargs)\n", (3739, 3769), False, 'from python_venv import const, env, reqs\n'), ((3884, 3930), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (3910, 3930), False, 'from python_venv import const, env, reqs\n'), ((4075, 4121), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (4101, 4121), False, 'from python_venv import const, env, reqs\n'), ((4269, 4315), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (4295, 4315), False, 'from python_venv import const, env, reqs\n'), ((4463, 4509), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (4489, 4509), False, 'from python_venv import const, env, reqs\n'), ((4662, 4708), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (4688, 4708), False, 'from python_venv import const, env, reqs\n'), ((4856, 4902), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (4882, 4902), False, 'from python_venv import const, env, reqs\n'), ((5043, 5089), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', (['"""dummy_req_scheme"""'], {}), "('dummy_req_scheme')\n", (5069, 5089), False, 'from python_venv import const, env, reqs\n'), ((919, 947), 'python_venv.env.BaseVirtualEnvironment', 'env.BaseVirtualEnvironment', ([], {}), '()\n', (945, 947), False, 'from python_venv import const, env, reqs\n')]
|
from django.core.management.base import BaseCommand
from ncharts.models import VariableTimes
from ncharts.models import ClientState
from ncharts import views as nc_views
from django.contrib.sessions.models import Session
class Command(BaseCommand):
def handle(self, **options):
sessions = Session.objects.all()
print("#sessions=%d" % len(sessions))
clnts = ClientState.objects.all()
print("#clients=%d" % len(clnts))
clnts_active = set()
ndeleted = 0
for sess in sessions:
sess_dict = sess.get_decoded()
for sess_key in sess_dict:
for clnt in clnts:
dset = clnt.dataset
project = dset.project
cid_name = nc_views.client_id_name(
project.name, dset.name)
if cid_name == sess_key and sess_dict[cid_name] == clnt.pk:
clnts_active.add(clnt.pk)
break
dtimes_active = set()
for clnt in clnts:
if clnt.pk in clnts_active:
print("client found in session: pk=%d, dataset=%s" % \
(clnt.pk, clnt.dataset))
# dtimes = clnt.data_times.all()
# for dt in dtimes:
# print("client pk=%d, active data_time, type(dt)=%s, dt.pk=%d" % \
# (clnt.pk, type(dt), dt.pk))
dtimes_active.update(clnt.data_times.all())
else:
print("client not found in session: pk=%d, dataset=%s, deleting" % \
(clnt.pk, clnt.dataset))
clnt.delete()
ndeleted += 1
print("#clients deleted=%d" % (ndeleted))
vtimes = VariableTimes.objects.all()
print("#vtimes=%d" % len(vtimes))
ndeleted = 0
for vt in vtimes:
# print("type vt=%s" % type(vt))
if vt not in dtimes_active:
print("VariableTime not found in a client: pk=%d, deleting" % \
vt.pk)
vt.delete()
ndeleted += 1
print("#vtimes deleted=%d" % (ndeleted))
|
[
"ncharts.views.client_id_name",
"django.contrib.sessions.models.Session.objects.all",
"ncharts.models.VariableTimes.objects.all",
"ncharts.models.ClientState.objects.all"
] |
[((306, 327), 'django.contrib.sessions.models.Session.objects.all', 'Session.objects.all', ([], {}), '()\n', (325, 327), False, 'from django.contrib.sessions.models import Session\n'), ((391, 416), 'ncharts.models.ClientState.objects.all', 'ClientState.objects.all', ([], {}), '()\n', (414, 416), False, 'from ncharts.models import ClientState\n'), ((1790, 1817), 'ncharts.models.VariableTimes.objects.all', 'VariableTimes.objects.all', ([], {}), '()\n', (1815, 1817), False, 'from ncharts.models import VariableTimes\n'), ((773, 821), 'ncharts.views.client_id_name', 'nc_views.client_id_name', (['project.name', 'dset.name'], {}), '(project.name, dset.name)\n', (796, 821), True, 'from ncharts import views as nc_views\n')]
|
import os
import fnmatch
def finder(path, ext):
"""Returns files from path by extension"""
l = []
if not ext.startswith('*.'):
ext = '*.{0}'.format(ext)
for path, dirs, files in os.walk(os.path.abspath(path)):
for f in fnmatch.filter(files, ext):
l.append(os.path.join(path, f))
return l
|
[
"fnmatch.filter",
"os.path.abspath",
"os.path.join"
] |
[((221, 242), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (236, 242), False, 'import os\n'), ((263, 289), 'fnmatch.filter', 'fnmatch.filter', (['files', 'ext'], {}), '(files, ext)\n', (277, 289), False, 'import fnmatch\n'), ((313, 334), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (325, 334), False, 'import os\n')]
|
from datetime import datetime
from airflow.operators.dummy_operator import DummyOperator
from marquez_airflow import DAG
DAG_NAME = 'test_dag'
default_args = {
'depends_on_past': False,
'start_date': datetime(2019, 2, 1),
}
dag = DAG(DAG_NAME, schedule_interval='0 0 * * *',
catchup=False,
default_args=default_args, description="My awesome DAG")
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag)
run_this_2.set_upstream(run_this_1)
|
[
"marquez_airflow.DAG",
"airflow.operators.dummy_operator.DummyOperator",
"datetime.datetime"
] |
[((243, 364), 'marquez_airflow.DAG', 'DAG', (['DAG_NAME'], {'schedule_interval': '"""0 0 * * *"""', 'catchup': '(False)', 'default_args': 'default_args', 'description': '"""My awesome DAG"""'}), "(DAG_NAME, schedule_interval='0 0 * * *', catchup=False, default_args=\n default_args, description='My awesome DAG')\n", (246, 364), False, 'from marquez_airflow import DAG\n'), ((394, 438), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""run_this_1"""', 'dag': 'dag'}), "(task_id='run_this_1', dag=dag)\n", (407, 438), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((452, 496), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""run_this_2"""', 'dag': 'dag'}), "(task_id='run_this_2', dag=dag)\n", (465, 496), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((212, 232), 'datetime.datetime', 'datetime', (['(2019)', '(2)', '(1)'], {}), '(2019, 2, 1)\n', (220, 232), False, 'from datetime import datetime\n')]
|
# encoding: utf-8
from miniworld.model.spatial.MovementPattern.RandomWalk import RandomWalk
from .AbstractNode import AbstractNode
__author__ = "<NAME>"
__email__ = "uni at lamp<EMAIL>"
class DefaultNode(AbstractNode):
"""
Attributes
----------
crnt_movement_pattern : AbstractMovementPattern
dict_of_movement_pattern : dict<String, AbstractMovementPattern>
"""
def __init__(self, node_id):
super(DefaultNode, self).__init__(node_id)
self.crnt_movement_pattern = RandomWalk()
self.dict_of_movement_pattern["RandomWalk"] = self.crnt_movement_pattern
def __check_conditions(self):
pass
|
[
"miniworld.model.spatial.MovementPattern.RandomWalk.RandomWalk"
] |
[((544, 556), 'miniworld.model.spatial.MovementPattern.RandomWalk.RandomWalk', 'RandomWalk', ([], {}), '()\n', (554, 556), False, 'from miniworld.model.spatial.MovementPattern.RandomWalk import RandomWalk\n')]
|
from django.apps import apps
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
class EventManager(models.Manager):
def current_event(self):
return self.filter(published=True).order_by("-start_time").first()
def current_event_id(self):
e = self.current_event()
if e:
return e.id
return None
def current_registration_open(self):
e = self.current_event()
if e:
return e.registration_open
return False
def current_submission_open(self):
e = self.current_event()
if e:
return e.submission_open
return False
def all_but_current(self):
return self.exclude(id=self.current_event_id())
def create_event(self, title, **kwargs):
event = self.model(title=title, **kwargs)
event.save()
return event
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(verbose_name=_("Event title"), max_length=256, unique=True)
slug = models.SlugField(verbose_name=_("Short name for URLs"), unique=True)
description = models.TextField(verbose_name=_("Description"))
location = models.TextField(verbose_name=_("Location"), blank=True)
full_day = models.BooleanField(verbose_name=_("Full day event"), default=False)
start_time = models.DateTimeField(verbose_name=_("Start time"))
end_time = models.DateTimeField(verbose_name=_("End time"))
published = models.BooleanField(verbose_name=_("Published"), default=True)
registration_open = models.BooleanField(
verbose_name=_("Registration Open"), default=False
)
submission_open = models.BooleanField(
verbose_name=_("Submission Open"), default=False
)
voting_open = models.BooleanField(
verbose_name=_("Voting Open"),
default=False,
help_text=_("Attendees can vote for their preferred sessions"),
)
sessions_published = models.BooleanField(
verbose_name=_("Grid Published"), default=False
)
talkformat = models.ManyToManyField(
"talk.TalkFormat", verbose_name=_("Talk Formats")
)
objects = EventManager()
class Meta:
verbose_name = _("Event")
verbose_name_plural = _("Events")
def registration_count(self):
"""Returns the count of registered attendees."""
a = apps.get_app_config("attendee").get_model("Attendee")
return a.objects.filter(event=self).count()
registration_count.short_description = _("Registration Count")
@property
def feedback_open(self):
"""
:return: True if the event has started and feedback is allowed
"""
return (
self.published
and self.start_time is not None
and self.start_time <= timezone.now()
)
def is_started(self):
return self.start_time < timezone.now()
def is_running(self):
return self.start_time < timezone.now() < self.end_time
def has_ended(self):
return self.end_time < timezone.now()
def is_raffle_available(self):
return not self.has_ended()
def get_absolute_url(self):
return reverse("session_list", kwargs={"event": self.slug})
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
if not self.slug:
self.slug = slugify(self.title)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return self.title
|
[
"django.utils.timezone.now",
"django.utils.text.slugify",
"django.urls.reverse",
"django.apps.apps.get_app_config",
"django.utils.translation.ugettext_lazy"
] |
[((2711, 2734), 'django.utils.translation.ugettext_lazy', '_', (['"""Registration Count"""'], {}), "('Registration Count')\n", (2712, 2734), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2404, 2414), 'django.utils.translation.ugettext_lazy', '_', (['"""Event"""'], {}), "('Event')\n", (2405, 2414), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2445, 2456), 'django.utils.translation.ugettext_lazy', '_', (['"""Events"""'], {}), "('Events')\n", (2446, 2456), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3380, 3432), 'django.urls.reverse', 'reverse', (['"""session_list"""'], {'kwargs': "{'event': self.slug}"}), "('session_list', kwargs={'event': self.slug})\n", (3387, 3432), False, 'from django.urls import reverse\n'), ((1166, 1182), 'django.utils.translation.ugettext_lazy', '_', (['"""Event title"""'], {}), "('Event title')\n", (1167, 1182), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1254, 1278), 'django.utils.translation.ugettext_lazy', '_', (['"""Short name for URLs"""'], {}), "('Short name for URLs')\n", (1255, 1278), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1341, 1357), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (1342, 1357), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1404, 1417), 'django.utils.translation.ugettext_lazy', '_', (['"""Location"""'], {}), "('Location')\n", (1405, 1417), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1479, 1498), 'django.utils.translation.ugettext_lazy', '_', (['"""Full day event"""'], {}), "('Full day event')\n", (1480, 1498), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1566, 1581), 'django.utils.translation.ugettext_lazy', '_', (['"""Start time"""'], {}), "('Start time')\n", (1567, 1581), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1632, 1645), 'django.utils.translation.ugettext_lazy', '_', (['"""End time"""'], {}), "('End time')\n", (1633, 1645), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1696, 1710), 'django.utils.translation.ugettext_lazy', '_', (['"""Published"""'], {}), "('Published')\n", (1697, 1710), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1792, 1814), 'django.utils.translation.ugettext_lazy', '_', (['"""Registration Open"""'], {}), "('Registration Open')\n", (1793, 1814), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1900, 1920), 'django.utils.translation.ugettext_lazy', '_', (['"""Submission Open"""'], {}), "('Submission Open')\n", (1901, 1920), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2002, 2018), 'django.utils.translation.ugettext_lazy', '_', (['"""Voting Open"""'], {}), "('Voting Open')\n", (2003, 2018), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2061, 2113), 'django.utils.translation.ugettext_lazy', '_', (['"""Attendees can vote for their preferred sessions"""'], {}), "('Attendees can vote for their preferred sessions')\n", (2062, 2113), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2188, 2207), 'django.utils.translation.ugettext_lazy', '_', (['"""Grid Published"""'], {}), "('Grid Published')\n", (2189, 2207), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2310, 2327), 'django.utils.translation.ugettext_lazy', '_', (['"""Talk Formats"""'], {}), "('Talk Formats')\n", (2311, 2327), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3082, 3096), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3094, 3096), False, 'from django.utils import timezone\n'), ((3157, 3171), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3169, 3171), False, 'from django.utils import timezone\n'), ((3245, 3259), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3257, 3259), False, 'from django.utils import timezone\n'), ((3590, 3609), 'django.utils.text.slugify', 'slugify', (['self.title'], {}), '(self.title)\n', (3597, 3609), False, 'from django.utils.text import slugify\n'), ((2561, 2592), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['"""attendee"""'], {}), "('attendee')\n", (2580, 2592), False, 'from django.apps import apps\n'), ((2997, 3011), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3009, 3011), False, 'from django.utils import timezone\n')]
|
"""
===================
Setup Scene Example
===================
This script provides examples for loading, using and saving an object library based on a YAML file.
We will step through the individual commands, generate thumbnails, VHACD meshes, URDF files for all objects.
After that, we will compute the stable poses of each object.
We randomly sample scenes exploiting the stable poses of the objects.
We can interact with the object instances and move them, and put them into a simulator so they attain a resting
pose again.
"""
import argparse
import os
import burg_toolkit as burg
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--lib', type=str,
default='/home/rudorfem/datasets/object_libraries/test_library/test_library.yaml',
help='path to object library file')
parser.add_argument('--skip', action='store_true', default=False,
help='use this option to skip through the user interactions')
parser.add_argument('--override', action='store_true', default=False,
help='if activated, all object data will be regenerated even if it already exists')
return parser.parse_args()
def wait_for_user(skip=False):
if skip:
return
print('press any key to continue')
input()
def main(object_library_fn, skip, override):
object_library = burg.ObjectLibrary.from_yaml(object_library_fn)
library_dir = os.path.dirname(object_library_fn)
print(object_library) # prints short version
object_library.print_details() # gives more detailed output about contained objects
print('*************************')
print('next action: generate thumbnail files for all objects')
wait_for_user(skip)
thumbnail_dir = os.path.join(library_dir, 'thumbnails')
object_library.generate_thumbnails(thumbnail_dir, override=override)
print(f'thumbnails created in {thumbnail_dir}')
print('*************************')
print('next action: generate vhacd meshes')
wait_for_user(skip)
vhacd_dir = os.path.join(library_dir, 'vhacd')
object_library.generate_vhacd_files(vhacd_dir, override=override)
print(f'vhacd files created in {vhacd_dir}')
print('*************************')
print('next action: generate urdf files')
wait_for_user(skip)
urdf_dir = os.path.join(library_dir, 'urdf')
object_library.generate_urdf_files(urdf_dir, use_vhacd=True, override=override)
print(f'urdf files created in {urdf_dir}')
print('*************************')
print('next action: compute stable poses for objects and verify with vhacd in simulation')
wait_for_user(skip)
object_library.compute_stable_poses(verify_in_sim=True, override=override)
print('stable poses computed.')
print('*************************')
print('all information in object library should be completed now:')
object_library.print_details()
print('*************************')
new_lib_fn = f'{object_library_fn[:-5]}_roundtrip.yaml'
print(f'next action: save object library to {new_lib_fn}')
wait_for_user(skip)
object_library.to_yaml(new_lib_fn)
print('object library saved.')
print('*************************')
print('next action: sampling scenes with object instances in stable poses, and visualise.')
print('note: you need to close the open3d window to continue. (not the simulation window later on, though!)')
dim = (1, 0.5)
n_instances = 5
print(f'{n_instances} instances will be placed in ground area of {dim}')
wait_for_user(skip)
scene = burg.sampling.sample_scene(
object_library,
ground_area=dim,
instances_per_scene=n_instances,
instances_per_object=1
)
burg.visualization.show_geometries([scene])
print('*************************')
print('next action: simulate this scene to make sure it is at rest, then visualise again.')
wait_for_user(skip)
sim = burg.scene_sim.SceneSimulator(verbose=True) # verbose shows the simulator GUI, slower than real-time
sim.simulate_scene(scene) # the poses of all instances in the scene are automatically updated by the simulator
sim.dismiss() # can also reuse, then the window stays open
burg.visualization.show_geometries([scene])
print('*************************')
print('next action: manually change the pose of an object instance, visualise, simulate, visualise.')
wait_for_user(skip)
instance = scene.objects[0]
# we lift it up a bit to avoid any collisions with other objects
instance.pose[2, 3] = instance.pose[2, 3] + 0.2
burg.visualization.show_geometries([scene])
sim = burg.scene_sim.SceneSimulator(verbose=True)
sim.simulate_scene(scene)
burg.visualization.show_geometries([scene])
sim.dismiss()
print('*************************')
print('that was all, thank you and good bye.')
if __name__ == "__main__":
args = parse_args()
main(args.lib, args.skip, args.override)
|
[
"burg_toolkit.ObjectLibrary.from_yaml",
"argparse.ArgumentParser",
"burg_toolkit.visualization.show_geometries",
"os.path.dirname",
"burg_toolkit.scene_sim.SceneSimulator",
"burg_toolkit.sampling.sample_scene",
"os.path.join"
] |
[((622, 666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (645, 666), False, 'import argparse\n'), ((1426, 1473), 'burg_toolkit.ObjectLibrary.from_yaml', 'burg.ObjectLibrary.from_yaml', (['object_library_fn'], {}), '(object_library_fn)\n', (1454, 1473), True, 'import burg_toolkit as burg\n'), ((1492, 1526), 'os.path.dirname', 'os.path.dirname', (['object_library_fn'], {}), '(object_library_fn)\n', (1507, 1526), False, 'import os\n'), ((1818, 1857), 'os.path.join', 'os.path.join', (['library_dir', '"""thumbnails"""'], {}), "(library_dir, 'thumbnails')\n", (1830, 1857), False, 'import os\n'), ((2111, 2145), 'os.path.join', 'os.path.join', (['library_dir', '"""vhacd"""'], {}), "(library_dir, 'vhacd')\n", (2123, 2145), False, 'import os\n'), ((2390, 2423), 'os.path.join', 'os.path.join', (['library_dir', '"""urdf"""'], {}), "(library_dir, 'urdf')\n", (2402, 2423), False, 'import os\n'), ((3639, 3759), 'burg_toolkit.sampling.sample_scene', 'burg.sampling.sample_scene', (['object_library'], {'ground_area': 'dim', 'instances_per_scene': 'n_instances', 'instances_per_object': '(1)'}), '(object_library, ground_area=dim,\n instances_per_scene=n_instances, instances_per_object=1)\n', (3665, 3759), True, 'import burg_toolkit as burg\n'), ((3798, 3841), 'burg_toolkit.visualization.show_geometries', 'burg.visualization.show_geometries', (['[scene]'], {}), '([scene])\n', (3832, 3841), True, 'import burg_toolkit as burg\n'), ((4012, 4055), 'burg_toolkit.scene_sim.SceneSimulator', 'burg.scene_sim.SceneSimulator', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4041, 4055), True, 'import burg_toolkit as burg\n'), ((4298, 4341), 'burg_toolkit.visualization.show_geometries', 'burg.visualization.show_geometries', (['[scene]'], {}), '([scene])\n', (4332, 4341), True, 'import burg_toolkit as burg\n'), ((4669, 4712), 'burg_toolkit.visualization.show_geometries', 'burg.visualization.show_geometries', (['[scene]'], {}), '([scene])\n', (4703, 4712), True, 'import burg_toolkit as burg\n'), ((4723, 4766), 'burg_toolkit.scene_sim.SceneSimulator', 'burg.scene_sim.SceneSimulator', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4752, 4766), True, 'import burg_toolkit as burg\n'), ((4801, 4844), 'burg_toolkit.visualization.show_geometries', 'burg.visualization.show_geometries', (['[scene]'], {}), '([scene])\n', (4835, 4844), True, 'import burg_toolkit as burg\n')]
|
"""Requests Table."""
import json
import logging
from datetime import datetime
import cherrypy
from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum
from sqlalchemy.orm import relationship
from lzproduction.utils.collections import subdict
from ..utils import db_session
from ..statuses import LOCALSTATUS
from .SQLTableBase import SQLTableBase
from .JSONTableEncoder import JSONTableEncoder
from .Users import Users
from .ParametricJobs import ParametricJobs
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@cherrypy.expose
class Requests(SQLTableBase):
"""Requests SQL Table."""
__tablename__ = 'requests'
id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
requester_id = Column(Integer, ForeignKey('users.id'), nullable=False)
request_date = Column(String(250), nullable=False)
source = Column(String(250), nullable=False)
detector = Column(String(250), nullable=False)
sim_lead = Column(String(250), nullable=False)
status = Column(Enum(LOCALSTATUS), nullable=False, default=LOCALSTATUS.Requested)
description = Column(String(250), nullable=False)
timestamp = Column(TIMESTAMP, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
parametricjobs = relationship("ParametricJobs", back_populates="request")
def submit(self):
"""Submit Request."""
with db_session() as session:
parametricjobs = session.query(ParametricJobs).filter_by(request_id=self.id).all()
session.expunge_all()
session.merge(self).status = LOCALSTATUS.Submitting
logger.info("Submitting request %s", self.id)
submitted_jobs = []
try:
for job in parametricjobs:
job.submit()
submitted_jobs.append(job)
except:
logger.exception("Exception while submitting request %s", self.id)
logger.info("Resetting associated ParametricJobs")
for job in submitted_jobs:
job.reset()
def delete_parametric_jobs(self, session):
"""Delete associated ParametricJob jobs."""
logger.info("Deleting ParametricJobs for Request id: %s", self.id)
parametric_jobs = session.query(ParametricJobs)\
.filter_by(request_id=self.id)
for job in parametric_jobs.all():
job.delete_dirac_jobs(session)
parametric_jobs.delete(synchronize_session=False)
def update_status(self):
"""Update request status."""
with db_session() as session:
parametricjobs = session.query(ParametricJobs).filter_by(request_id=self.id).all()
session.expunge_all()
statuses = []
for job in parametricjobs:
try:
statuses.append(job.update_status())
except:
logger.exception("Exception updating ParametricJob %s", job.id)
status = max(statuses or [self.status])
if status != self.status:
with db_session(reraise=False) as session:
session.merge(self).status = status
logger.info("Request %s moved to state %s", self.id, status.name)
@staticmethod
def GET(reqid=None): # pylint: disable=invalid-name
"""REST Get method."""
logger.debug("In GET: reqid = %s", reqid)
requester = cherrypy.request.verified_user
with db_session() as session:
user_requests = session.query(Requests).filter_by(requester_id=requester.id)
# Get all requests.
if reqid is None:
if requester.admin:
all_requests = session.query(Requests, Users)\
.join(Users, Requests.requester_id == Users.id)\
.all()
# could make a specialised encoder for this.
return json.dumps({'data': [dict(request, requester=user.name, status=request.status.name)
for request, user in all_requests]},
cls=JSONTableEncoder)
return json.dumps({'data': user_requests.all()}, cls=JSONTableEncoder)
# Get specific request.
if requester.admin:
user_requests = session.query(Requests)
request = user_requests.filter_by(id=reqid).first()
return json.dumps({'data': request}, cls=JSONTableEncoder)
@staticmethod
def DELETE(reqid): # pylint: disable=invalid-name
"""REST Delete method."""
logger.debug("In DELETE: reqid = %s", reqid)
if cherrypy.request.verified_user.admin:
with db_session() as session:
logger.info("Deleting Request id: %s", reqid)
try:
request = session.query(Requests).filter_by(id=reqid).one()
except NoResultFound:
logger.warning("No Request found with id: %s", reqid)
except MultipleResultsFound:
logger.error("Multiple Requests found with id: %s!", reqid)
else:
request.delete_parametric_jobs(session)
session.delete(request)
return Requests.GET()
@staticmethod
def PUT(reqid, **kwargs): # pylint: disable=invalid-name
"""REST Put method."""
logger.debug("In PUT: reqid = %s, kwargs = %s", reqid, kwargs)
requester = cherrypy.request.verified_user
status_update = kwargs.pop('status', None)
with db_session() as session:
query = session.query(Requests).filter_by(id=reqid)
if requester.admin and status_update == 'Approved':
query.update(subdict(kwargs, ('description',
'sim_lead',
'detector',
'source'), status=LOCALSTATUS.Approved))
return Requests.GET()
if not requester.admin:
query = query.filter_by(requester_id=requester.id)
query.update(subdict(kwargs, ('description', 'sim_lead', 'detector', 'source')))
return Requests.GET()
@staticmethod
def POST(**kwargs): # pylint: disable=invalid-name
"""REST Post method."""
logger.debug("In POST: kwargs = %s", kwargs)
selected_macros = kwargs.pop('selected_macros', [])
if not isinstance(selected_macros, list):
selected_macros = [selected_macros]
with db_session() as session:
request = Requests(**subdict(kwargs, Requests.columns,
requester_id=cherrypy.request.verified_user.id,
request_date=datetime.now().strftime('%d/%m/%Y'),
status=LOCALSTATUS.Requested))
session.add(request)
session.flush()
session.refresh(request)
parametricjobs = []
if 'app' in kwargs:
for macro in selected_macros:
path, njobs, nevents, seed = macro.split()
parametricjobs.append(subdict(kwargs, ParametricJobs.columns,
request_id=request.id,
status=LOCALSTATUS.Requested,
macro=path,
njobs=njobs,
nevents=nevents,
seed=seed))
elif kwargs.viewkeys() & {'reduction_lfn_inputdir',
'der_lfn_inputdir',
'lzap_lfn_inputdir'}:
parametricjobs.append(subdict(kwargs, ParametricJobs.columns,
request_id=request.id,
status=LOCALSTATUS.Requested))
if parametricjobs:
session.bulk_insert_mappings(ParametricJobs, parametricjobs)
else:
logger.warning("No ParametricJobs added to the DB.")
return Requests.GET()
|
[
"sqlalchemy.Enum",
"sqlalchemy.ForeignKey",
"json.dumps",
"lzproduction.utils.collections.subdict",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"sqlalchemy.String",
"datetime.datetime.now",
"logging.getLogger"
] |
[((495, 522), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (512, 522), False, 'import logging\n'), ((675, 708), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (681, 708), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((1178, 1267), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {'nullable': '(False)', 'default': 'datetime.utcnow', 'onupdate': 'datetime.utcnow'}), '(TIMESTAMP, nullable=False, default=datetime.utcnow, onupdate=\n datetime.utcnow)\n', (1184, 1267), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((1284, 1340), 'sqlalchemy.orm.relationship', 'relationship', (['"""ParametricJobs"""'], {'back_populates': '"""request"""'}), "('ParametricJobs', back_populates='request')\n", (1296, 1340), False, 'from sqlalchemy.orm import relationship\n'), ((776, 798), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (786, 798), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((842, 853), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (848, 853), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((891, 902), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (897, 902), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((942, 953), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (948, 953), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((993, 1004), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (999, 1004), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((1042, 1059), 'sqlalchemy.Enum', 'Enum', (['LOCALSTATUS'], {}), '(LOCALSTATUS)\n', (1046, 1059), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((1133, 1144), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (1139, 1144), False, 'from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum\n'), ((4486, 4537), 'json.dumps', 'json.dumps', (["{'data': request}"], {'cls': 'JSONTableEncoder'}), "({'data': request}, cls=JSONTableEncoder)\n", (4496, 4537), False, 'import json\n'), ((6230, 6296), 'lzproduction.utils.collections.subdict', 'subdict', (['kwargs', "('description', 'sim_lead', 'detector', 'source')"], {}), "(kwargs, ('description', 'sim_lead', 'detector', 'source'))\n", (6237, 6296), False, 'from lzproduction.utils.collections import subdict\n'), ((5828, 5928), 'lzproduction.utils.collections.subdict', 'subdict', (['kwargs', "('description', 'sim_lead', 'detector', 'source')"], {'status': 'LOCALSTATUS.Approved'}), "(kwargs, ('description', 'sim_lead', 'detector', 'source'), status=\n LOCALSTATUS.Approved)\n", (5835, 5928), False, 'from lzproduction.utils.collections import subdict\n'), ((7319, 7469), 'lzproduction.utils.collections.subdict', 'subdict', (['kwargs', 'ParametricJobs.columns'], {'request_id': 'request.id', 'status': 'LOCALSTATUS.Requested', 'macro': 'path', 'njobs': 'njobs', 'nevents': 'nevents', 'seed': 'seed'}), '(kwargs, ParametricJobs.columns, request_id=request.id, status=\n LOCALSTATUS.Requested, macro=path, njobs=njobs, nevents=nevents, seed=seed)\n', (7326, 7469), False, 'from lzproduction.utils.collections import subdict\n'), ((7986, 8083), 'lzproduction.utils.collections.subdict', 'subdict', (['kwargs', 'ParametricJobs.columns'], {'request_id': 'request.id', 'status': 'LOCALSTATUS.Requested'}), '(kwargs, ParametricJobs.columns, request_id=request.id, status=\n LOCALSTATUS.Requested)\n', (7993, 8083), False, 'from lzproduction.utils.collections import subdict\n'), ((6896, 6910), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6908, 6910), False, 'from datetime import datetime\n')]
|
import binascii
import numpy as np
import copy
from scapy.all import TCP, UDP, IP, IPv6, ARP, raw
def get_packet_matrix(packet):
"""
Transform a packet content into 1D array of bytes
Parameters
----------
packet : an IP packet
Returns
-------
1D ndarry of packet bytes
"""
hexst = binascii.hexlify(raw(packet))
fh = np.array([int(hexst[i:i+2],16) for i in range(0, len(hexst), 2)])
fh = np.uint8(fh)
return fh.reshape(-1)
def santize_packet_zeros(packet):
"""
This method sanitize a packet by annonymizing IP and MAC adresses
Parameters
----------
packet : a packet
Returns
-------
sanitized packet
"""
pkt = copy.deepcopy(packet)
ipv4='0.0.0.0'
ipv6='0000:00::00'
mac='00:00:00:00:00:00'
if pkt.haslayer(IPv6):
pkt[IPv6].src = ipv6
pkt[IPv6].dst = ipv6
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(IP) :
pkt[IP].src = ipv4
pkt[IP].dst = ipv4
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(ARP):
pkt[ARP].hwsrc = mac
pkt[ARP].hwdst = mac
pkt[ARP].psrc = ipv4
pkt[ARP].pdst = ipv4
else:
pass
return pkt
|
[
"copy.deepcopy",
"numpy.uint8",
"scapy.all.raw"
] |
[((444, 456), 'numpy.uint8', 'np.uint8', (['fh'], {}), '(fh)\n', (452, 456), True, 'import numpy as np\n'), ((718, 739), 'copy.deepcopy', 'copy.deepcopy', (['packet'], {}), '(packet)\n', (731, 739), False, 'import copy\n'), ((342, 353), 'scapy.all.raw', 'raw', (['packet'], {}), '(packet)\n', (345, 353), False, 'from scapy.all import TCP, UDP, IP, IPv6, ARP, raw\n')]
|
######################################################
#
# PyRAI2MD 2 module for thermostat in NVT ensemble
#
# Author <NAME>
# Sep 7 2021
#
######################################################
import numpy as np
def NoseHoover(traj):
""" Velocity scaling function in NVT ensemble (Nose Hoover thermostat)
Parameters: Type:
traj class trajectory class
Attribute: Type:
natom int number of atoms
temp float temperature
kinetic float kinetic energy
Vs list additional velocity information
kb float Boltzmann's constant
fs_to_au float unit conversion fs to au of time
"""
natom = traj.natom
kinetic = traj.kinetic
temp = traj.temp
size = traj.size
Vs = traj.Vs
kb = 3.16881 * 10**-6
fs_to_au = 2.4188843265857 * 10**-2
if len(Vs) == 0:
freq = 1 / (22 / fs_to_au) ## 22 fs to au Hz
Q1 = 3 * natom * temp * kb / freq**2
Q2 = temp * kb / freq**2
traj.Vs = [Q1, Q2, 0, 0]
else:
Q1, Q2, V1, V2 = Vs
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
s = np.exp(-V1 * size / 2)
traj.kinetic *= s**2
traj.velo *= s
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
traj.Vs = [Q1, Q2, V1, V2]
return traj
|
[
"numpy.exp"
] |
[((1333, 1355), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1339, 1355), True, 'import numpy as np\n'), ((1454, 1476), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1460, 1476), True, 'import numpy as np\n'), ((1489, 1511), 'numpy.exp', 'np.exp', (['(-V1 * size / 2)'], {}), '(-V1 * size / 2)\n', (1495, 1511), True, 'import numpy as np\n'), ((1583, 1605), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1589, 1605), True, 'import numpy as np\n'), ((1704, 1726), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1710, 1726), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/4/5 15:18
# @Author : <NAME>
# @Email : <EMAIL>
# @File : setup.py.py
import setuptools
__version__ = None
exec(open('liyi_cute/__init__.py').read())
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
tests_requires = [
"pytest"
]
install_requires = [
"marshmallow >= 3.15.0",
]
extras_requires = {
'test': tests_requires
}
package_data = {
"liyi_cute": ["*.py","*.so"]
}
setuptools.setup(
name="liyi-cute",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license='Apache 2.0',
description="A text processing tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/daiyizheng/liyi-cute",
keywords="Text processing tools, including named entity recognition, "
"relation extraction, event extraction, and some statistical "
"and visualization functions",
packages=setuptools.find_packages(),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development",
],
install_requires= install_requires,
tests_require=tests_requires,
python_requires='>=3.8',
package_data=package_data,
project_urls={
'Bug Reports': 'https://github.com/daiyizheng/liyi-cute/issues',
'Source': 'https://github.com/daiyizheng/liyi-cute',
}
)
|
[
"setuptools.find_packages"
] |
[((1107, 1133), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1131, 1133), False, 'import setuptools\n')]
|
import sys
import logging
from optparse import OptionParser
log = logging.getLogger('aws-cloudfront-extension.check_issue')
log_formatter = logging.Formatter(
'[%(asctime)s %(name)s][%(levelname)s] %(message)s')
log_stream_handler = logging.StreamHandler(sys.stdout)
log_stream_handler.setFormatter(log_formatter)
log.addHandler(log_stream_handler)
log.setLevel(logging.INFO)
REPRO_STEP = "### Reproduction Steps"
EXPECTED_RESULT = "### What did you expect to happen?"
ACTUAL_RESULT = "### What actually happened?"
def parse_opt():
parser = OptionParser(
usage="Usage: python check_issue.py [options]\n\t Check compliance for the content")
parser.add_option("-b", "--body",
dest="body",
help="The issue content body")
option, args = parser.parse_args()
return parser, option, args
def check_issue():
parser, option, args = parse_opt()
if not option.body:
log.error('Missing arguments: -b or --body')
parser.print_help()
sys.exit(1)
issue_content = get_issue_from_file(option.body)
log.info('Issue content: ' + issue_content)
index_repro = issue_content.find(REPRO_STEP)
index_expected = issue_content.find(EXPECTED_RESULT)
index_actual = issue_content.find(ACTUAL_RESULT)
if index_repro == -1 or index_expected == -1 or index_actual == -1:
log.error('Please fill in the information by using the template')
sys.exit(1)
repro_content = issue_content[index_repro + len(REPRO_STEP): index_expected]
expected_content = issue_content[
index_expected + len(EXPECTED_RESULT): index_actual]
log.info('Reproduce steps: ' + repro_content)
log.info('Expected result: ' + expected_content)
if len(repro_content.strip()) == 0 or len(expected_content.strip()) == 0:
log.error(
'Empty reproduce steps or expected result, please fill in these fields')
sys.exit(1)
log.info('Check issue compliance succeed')
def get_issue_from_file(file_name):
f = open(file_name, "r")
process_lines = f.readlines()
file_content = ''
for line in process_lines:
file_content += line
f.close()
return file_content
if __name__ == '__main__':
check_issue()
|
[
"optparse.OptionParser",
"logging.StreamHandler",
"logging.getLogger",
"logging.Formatter",
"sys.exit"
] |
[((67, 124), 'logging.getLogger', 'logging.getLogger', (['"""aws-cloudfront-extension.check_issue"""'], {}), "('aws-cloudfront-extension.check_issue')\n", (84, 124), False, 'import logging\n'), ((141, 211), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s %(name)s][%(levelname)s] %(message)s"""'], {}), "('[%(asctime)s %(name)s][%(levelname)s] %(message)s')\n", (158, 211), False, 'import logging\n'), ((238, 271), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (259, 271), False, 'import logging\n'), ((553, 662), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""Usage: python check_issue.py [options]\n\t Check compliance for the content"""'}), '(usage=\n """Usage: python check_issue.py [options]\n\t Check compliance for the content"""\n )\n', (565, 662), False, 'from optparse import OptionParser\n'), ((1031, 1042), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1039, 1042), False, 'import sys\n'), ((1459, 1470), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1467, 1470), False, 'import sys\n'), ((1946, 1957), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1954, 1957), False, 'import sys\n')]
|
import json
from common.mapr_logger.log import Log
class NodeLabels(object):
MAPR_LABEL = "mapr.com/usenode"
EXCLUSIVE_LABEL = "mapr.com/exclusivecluster"
def __init__(self, k8s):
self.k8s = k8s
self._node_count = 0
self._items = None
self._json = None
def _get_json(self):
Log.info("Retrieving node information...", stdout=True)
result, status = self.k8s.run_get("nodes -o=json")
if status != 0:
return None
self._json = json.loads(result)
if self._json is None:
Log.error("No JSON was returned from get nodes command")
return
self._items = self._json.get("items")
if self._items is None:
Log.error("No items dictonary in get nodes JSON")
return
self._node_count = len(self._items)
def get_mapr_use_node_labels(self, label):
nodes_set = 0
nodes_not_set = set()
for node in self._items:
node_name = node["metadata"]["name"]
mapr_usenode = node["metadata"]["labels"].get(label)
if mapr_usenode is not None:
nodes_set += 1
Log.info("Node: {0} has {1} label set to: {2}".format(node_name, label, mapr_usenode))
else:
nodes_not_set.add(node_name)
Log.info("Node: {0} does not have {1} label set".format(node_name, label))
Log.info("{0} node(s) found, {1} node(s) tagged with the MapR usage tag {2} while {3} node(s) not"
.format(self._node_count, nodes_set, label, len(nodes_not_set)), stdout=True)
return nodes_not_set
def process_labels(self):
self._get_json()
nodes_not_set = self.get_mapr_use_node_labels(NodeLabels.MAPR_LABEL)
if nodes_not_set is not None and len(nodes_not_set) > 0:
Log.info("Setting MapR usage tag {0} for {1} nodes...".format(NodeLabels.MAPR_LABEL, len(nodes_not_set)),
stdout=True)
for node_not_set in nodes_not_set:
self.k8s.run_label_mapr_node(node_not_set, NodeLabels.MAPR_LABEL, True)
nodes_not_set = self.get_mapr_use_node_labels(NodeLabels.EXCLUSIVE_LABEL)
if nodes_not_set is not None and len(nodes_not_set) > 0:
Log.info("Setting MapR usage tag {0} for {1} nodes...".format(NodeLabels.EXCLUSIVE_LABEL, len(nodes_not_set)),
stdout=True)
for node_not_set in nodes_not_set:
self.k8s.run_label_mapr_node(node_not_set, NodeLabels.EXCLUSIVE_LABEL, "None")
|
[
"common.mapr_logger.log.Log.info",
"common.mapr_logger.log.Log.error",
"json.loads"
] |
[((335, 390), 'common.mapr_logger.log.Log.info', 'Log.info', (['"""Retrieving node information..."""'], {'stdout': '(True)'}), "('Retrieving node information...', stdout=True)\n", (343, 390), False, 'from common.mapr_logger.log import Log\n'), ((520, 538), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (530, 538), False, 'import json\n'), ((582, 638), 'common.mapr_logger.log.Log.error', 'Log.error', (['"""No JSON was returned from get nodes command"""'], {}), "('No JSON was returned from get nodes command')\n", (591, 638), False, 'from common.mapr_logger.log import Log\n'), ((749, 798), 'common.mapr_logger.log.Log.error', 'Log.error', (['"""No items dictonary in get nodes JSON"""'], {}), "('No items dictonary in get nodes JSON')\n", (758, 798), False, 'from common.mapr_logger.log import Log\n')]
|
from django.conf.urls import url
from . import views
app_name = 'minicms'
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^(?P<path>.+)/$', views.page, name='page'),
]
|
[
"django.conf.urls.url"
] |
[((95, 137), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.homepage'], {'name': '"""homepage"""'}), "('^$', views.homepage, name='homepage')\n", (98, 137), False, 'from django.conf.urls import url\n'), ((144, 191), 'django.conf.urls.url', 'url', (['"""^(?P<path>.+)/$"""', 'views.page'], {'name': '"""page"""'}), "('^(?P<path>.+)/$', views.page, name='page')\n", (147, 191), False, 'from django.conf.urls import url\n')]
|
# -*- coding: utf-8 -*-
import torch
class Agent:
def __init__(self, env, cfg):
# if gpu is to be used
self.device = torch.device(
f"cuda:{cfg.gpu}" if torch.cuda.is_available() else "cpu"
)
self.env = env
self.cfg = cfg
self.n_states = self.env.observation_space.shape[0]
self.n_actions = self.env.action_space.n
def select_action(self, state):
pass
def optimize_agent(self, memory):
pass
def update_agent(self):
pass
def get_model(self):
return None
def reset_agent4test(self, **kwargs):
pass
|
[
"torch.cuda.is_available"
] |
[((187, 212), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (210, 212), False, 'import torch\n')]
|
import torch.nn as nn
import torch
class EventModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, device):
super(EventModel, self).__init__()
self.i2o = nn.Linear(input_size * 4, output_size, device=device)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
def forward(self, verb_vector, subject_vector, object_vector, date_vector):
input_combined = torch.cat((verb_vector, subject_vector, object_vector, date_vector), 1)
output = self.i2o(input_combined)
output = self.relu(output)
output = self.dropout(output)
return output
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.cat",
"torch.nn.Linear"
] |
[((199, 252), 'torch.nn.Linear', 'nn.Linear', (['(input_size * 4)', 'output_size'], {'device': 'device'}), '(input_size * 4, output_size, device=device)\n', (208, 252), True, 'import torch.nn as nn\n'), ((276, 291), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (286, 291), True, 'import torch.nn as nn\n'), ((312, 321), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (319, 321), True, 'import torch.nn as nn\n'), ((428, 499), 'torch.cat', 'torch.cat', (['(verb_vector, subject_vector, object_vector, date_vector)', '(1)'], {}), '((verb_vector, subject_vector, object_vector, date_vector), 1)\n', (437, 499), False, 'import torch\n')]
|
"""TrackML scoring metric"""
__authors__ = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return pandas.DataFrame.from_records(tracks, columns=cols)
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
|
[
"pandas.merge",
"numpy.true_divide",
"pandas.DataFrame.from_records"
] |
[((911, 1058), 'pandas.merge', 'pandas.merge', (["truth[['hit_id', 'particle_id', 'weight']]", "submission[['hit_id', 'track_id']]"], {'on': "['hit_id']", 'how': '"""left"""', 'validate': '"""one_to_one"""'}), "(truth[['hit_id', 'particle_id', 'weight']], submission[[\n 'hit_id', 'track_id']], on=['hit_id'], how='left', validate='one_to_one')\n", (923, 1058), False, 'import pandas\n'), ((4016, 4067), 'pandas.DataFrame.from_records', 'pandas.DataFrame.from_records', (['tracks'], {'columns': 'cols'}), '(tracks, columns=cols)\n', (4045, 4067), False, 'import pandas\n'), ((4488, 4545), 'numpy.true_divide', 'numpy.true_divide', (["tracks['major_nhits']", "tracks['nhits']"], {}), "(tracks['major_nhits'], tracks['nhits'])\n", (4505, 4545), False, 'import numpy\n'), ((4563, 4635), 'numpy.true_divide', 'numpy.true_divide', (["tracks['major_nhits']", "tracks['major_particle_nhits']"], {}), "(tracks['major_nhits'], tracks['major_particle_nhits'])\n", (4580, 4635), False, 'import numpy\n')]
|
"""
Define an Exporter Plugin class providing
additional options to xmodule lib ExportManager
"""
import datetime
from lxml import etree
from xmodule.modulestore import xml_exporter
from .. import app_settings
from . import resolvers
class PluggableCourseExportManager(xml_exporter.CourseExportManager):
"""
Export format-agnostic block/module course export manager.
Course export plugins should register themselves in the namespace
`openedx.exporters.course` and inherit from this class.
"""
@property
def name(self):
raise NotImplementedError
@property
def http_content_type(self):
raise NotImplementedError
@property
def filename_extension(self):
raise NotImplementedError
def process_root(self, root, export_fs):
"""
Perform any additional tasks to the root node.
"""
super(PluggableCourseExportManager, self).process_root(root, export_fs)
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Process additional content, like static assets.
"""
super(PluggableCourseExportManager, self).process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)
def post_process(self, root, export_fs):
"""
Perform any final processing after the other export tasks are done.
This is where most plugin export managers will do their work, after
export to XML. XModules and XBlocks provide XML serialization directly
or via mixin, and it's much more work to directly serialize to some other
format than to post-process XML output to another format
"""
def export(self):
"""
Perform the export given the parameters handed to this class at init.
"""
super(PluggableCourseExportManager, self).export()
def _load_export_xsl(self):
"""
Get the XSL stylesheet for post_processing.
"""
try:
return self.DEFAULT_XSL_STYLESHEET
except AttributeError:
raise # do something more intelligent here, not all exporter plugins may use XSL
def _do_xsl_transform(self, root, export_fs):
"""
Perform XSLT transform of export output using XSL stylesheet.
"""
parser = etree.XMLParser(recover=True) # use a forgiving parser, OLX is messy
parser.resolvers.add(resolvers.ExportFSResolver(export_fs))
parser.resolvers.add(resolvers.PyLocalXSLResolver())
parser.resolvers.add(resolvers.AssetURLResolver(export_fs))
xsl_sheet = bytes(self._load_export_xsl(), 'utf-8')
xslt_root = etree.XML(xsl_sheet, parser)
transform = etree.XSLT(xslt_root)
dt = datetime.datetime.now()
result_tree = transform(root, baseURL="'{}'".format(app_settings.LMS_ROOT_URL), curDateTime="'{}'".format(dt))
print((str(result_tree)))
return result_tree
|
[
"lxml.etree.XSLT",
"lxml.etree.XML",
"lxml.etree.XMLParser",
"datetime.datetime.now"
] |
[((2377, 2406), 'lxml.etree.XMLParser', 'etree.XMLParser', ([], {'recover': '(True)'}), '(recover=True)\n', (2392, 2406), False, 'from lxml import etree\n'), ((2724, 2752), 'lxml.etree.XML', 'etree.XML', (['xsl_sheet', 'parser'], {}), '(xsl_sheet, parser)\n', (2733, 2752), False, 'from lxml import etree\n'), ((2773, 2794), 'lxml.etree.XSLT', 'etree.XSLT', (['xslt_root'], {}), '(xslt_root)\n', (2783, 2794), False, 'from lxml import etree\n'), ((2808, 2831), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2829, 2831), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2019 by the contributors (see AUTHORS file).
:license: BSD-2-Clause, see LICENSE for details.
"""
from collections import namedtuple
from sphinxcontrib.confluencebuilder.translator import ConfluenceTranslator
from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _
from sphinxcontrib_confluencebuilder_util import EXT_NAME
import os
import unittest
Reporter = namedtuple('Reporter', 'warning')
class DummyDocument(dict):
def __init__(self, source, warn=False):
self['source'] = source
self.reporter = Reporter(warn)
class TestConfluenceTranslator(unittest.TestCase):
@classmethod
def setUpClass(self):
self.config = _.prepareConfiguration()
self.test_dir = os.path.dirname(os.path.realpath(__file__))
def test_docname_and_docparent(self):
mock_ds = os.path.join(self.test_dir, 'dataset-common')
doc_dir, doctree_dir = _.prepareDirectories('config-dummy')
mock_docpath = os.path.join(mock_ds, 'foo', 'bar' , 'baz.rst')
doc = DummyDocument(mock_docpath)
# prepare a dummy application; no need to actually build
with _.prepareSphinx(mock_ds, doc_dir, doctree_dir, self.config) as app:
translator = ConfluenceTranslator(doc, app.builder)
self.assertEqual(translator.docname, 'foo/bar/baz')
self.assertEqual(translator.docparent, 'foo/bar/')
|
[
"sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareSphinx",
"os.path.realpath",
"sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareConfiguration",
"sphinxcontrib.confluencebuilder.translator.ConfluenceTranslator",
"collections.namedtuple",
"sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareDirectories",
"os.path.join"
] |
[((442, 475), 'collections.namedtuple', 'namedtuple', (['"""Reporter"""', '"""warning"""'], {}), "('Reporter', 'warning')\n", (452, 475), False, 'from collections import namedtuple\n'), ((736, 760), 'sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareConfiguration', '_.prepareConfiguration', ([], {}), '()\n', (758, 760), True, 'from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _\n'), ((890, 935), 'os.path.join', 'os.path.join', (['self.test_dir', '"""dataset-common"""'], {}), "(self.test_dir, 'dataset-common')\n", (902, 935), False, 'import os\n'), ((967, 1003), 'sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareDirectories', '_.prepareDirectories', (['"""config-dummy"""'], {}), "('config-dummy')\n", (987, 1003), True, 'from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _\n'), ((1027, 1073), 'os.path.join', 'os.path.join', (['mock_ds', '"""foo"""', '"""bar"""', '"""baz.rst"""'], {}), "(mock_ds, 'foo', 'bar', 'baz.rst')\n", (1039, 1073), False, 'import os\n'), ((801, 827), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (817, 827), False, 'import os\n'), ((1196, 1255), 'sphinxcontrib_confluencebuilder_util.ConfluenceTestUtil.prepareSphinx', '_.prepareSphinx', (['mock_ds', 'doc_dir', 'doctree_dir', 'self.config'], {}), '(mock_ds, doc_dir, doctree_dir, self.config)\n', (1211, 1255), True, 'from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _\n'), ((1289, 1327), 'sphinxcontrib.confluencebuilder.translator.ConfluenceTranslator', 'ConfluenceTranslator', (['doc', 'app.builder'], {}), '(doc, app.builder)\n', (1309, 1327), False, 'from sphinxcontrib.confluencebuilder.translator import ConfluenceTranslator\n')]
|
"""
Functions for creating and deleting the ModMon database.
"""
import argparse
import sys
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from .schema import Base
from .connect import get_database_config, DATABASE_NAME, ENGINE
from ..config import config
from ..utils.utils import ask_for_confirmation
ADMIN_CONNECTION_STRING, _ = get_database_config(config["database-admin"])
def create_database(db_name=DATABASE_NAME, force=False):
"""Create the ModMon database.
Parameters
----------
db_name : str, optional
Name of the database to create, by default modmon.db.connect.DB
force : bool, optional
If True delete any pre-existing database and create a new one, by default False
"""
engine = create_engine(ADMIN_CONNECTION_STRING)
conn = engine.connect()
conn.execute("commit")
try:
conn.execute(f'CREATE DATABASE "{db_name}"')
except ProgrammingError as e:
if f'database "{db_name}" already exists' in str(e):
if force:
print("Deleting pre-existing database.")
delete_database(db_name=db_name, force=force)
print("Creating new database.")
create_database(db_name=db_name, force=force)
else:
print(f'Database "{db_name}" already exists.')
else:
raise
def delete_database(db_name=DATABASE_NAME, force=False):
"""Delete the ModMon database.
Parameters
----------
db_name : str, optional
Name of the database to delete, by default modmon.db.connect.DB
force : bool, optional
Unless True ask the user for confirmation before deleting, by default False
"""
if not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data currently in the database."
)
if not confirmed:
print("Aborting create.")
return
engine = create_engine(ADMIN_CONNECTION_STRING)
conn = engine.connect()
conn.execute("commit")
try:
conn.execute(f'DROP DATABASE "{db_name}"')
except ProgrammingError as e:
if f'database "{db_name}" does not exist' in str(e):
print(f'There is no database called "{db_name}".')
else:
raise
def create_schema(force=False, checkfirst=True):
"""Create the tables and schema on the ModMon database.
Parameters
----------
force : bool, optional
Unless True ask for confirmation before taking potentially destructive action if
checkfirst is False, by default False
checkfirst : bool, optional
If True don't recreate tables already present in the database, by default True
"""
if not checkfirst and not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data currently in the database."
)
if not confirmed:
print("Aborting create.")
return
Base.metadata.create_all(ENGINE, checkfirst=checkfirst)
def delete_schema(force=False, checkfirst=True):
"""Delete all tables and data stored in the ModMon database.
Parameters
----------
force : bool, optional
Unless True ask the user for confirmation before proceeding, by default False
checkfirst : bool, optional
If True only issue DROPs for tables confirmed to be present, by default True
"""
if not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete ALL tables and data in the database."
)
if not confirmed:
print("Aborting delete.")
return
Base.metadata.drop_all(ENGINE, checkfirst=checkfirst)
def main():
"""Delete and re-create the model monitoring database.
To be used from command-line as modmon_db_create
"""
parser = argparse.ArgumentParser(
description="Create the model monitoring database (ModMon)."
)
parser.add_argument(
"--force",
help="Delete and recreate the database without asking for confirmation if set",
action="store_true",
)
args = parser.parse_args()
if not args.force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data in any pre-existing ModMon database."
)
if not confirmed:
print("Aborting create.")
sys.exit(0)
create_database(force=True)
create_schema(force=True, checkfirst=False)
|
[
"sqlalchemy.create_engine",
"argparse.ArgumentParser",
"sys.exit"
] |
[((777, 815), 'sqlalchemy.create_engine', 'create_engine', (['ADMIN_CONNECTION_STRING'], {}), '(ADMIN_CONNECTION_STRING)\n', (790, 815), False, 'from sqlalchemy import create_engine\n'), ((1979, 2017), 'sqlalchemy.create_engine', 'create_engine', (['ADMIN_CONNECTION_STRING'], {}), '(ADMIN_CONNECTION_STRING)\n', (1992, 2017), False, 'from sqlalchemy import create_engine\n'), ((3885, 3975), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create the model monitoring database (ModMon)."""'}), "(description=\n 'Create the model monitoring database (ModMon).')\n", (3908, 3975), False, 'import argparse\n'), ((4421, 4432), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4429, 4432), False, 'import sys\n')]
|
import json
f = open('data/movies.json')
data = json.load(f)
for movie in data[:10]:
print(movie["Title"])
|
[
"json.load"
] |
[((49, 61), 'json.load', 'json.load', (['f'], {}), '(f)\n', (58, 61), False, 'import json\n')]
|
###############################################################################
## The MIT License
##
## SPDX short identifier: MIT
##
## Copyright 2019 Genentech Inc. South San Francisco
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
###############################################################################
###############################################################################
## Portions of this software were derived from code originally developed
## by <NAME> and copyrighted by Stanford University and the Authors
###############################################################################
from distutils.core import setup
from distutils.extension import Extension
import os
import sys
import platform
openmm_dir = '@OPENMM_DIR@'
nn_plugin_header_dir = '@NN_PLUGIN_HEADER_DIR@'
nn_plugin_library_dir = '@NN_PLUGIN_LIBRARY_DIR@'
# setup extra compile and link arguments on Mac
extra_compile_args = []
extra_link_args = []
if platform.system() == 'Darwin':
extra_compile_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
extra_link_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7', '-Wl', '-rpath', openmm_dir+'/lib']
extension = Extension(name='_openmm_py',
sources=['PYPluginWrapper.cpp'],
libraries=['OpenMM', 'OpenMMPY'],
include_dirs=[os.path.join(openmm_dir, 'include'), nn_plugin_header_dir],
library_dirs=[os.path.join(openmm_dir, 'lib'), nn_plugin_library_dir],
runtime_library_dirs=[os.path.join(openmm_dir, 'lib')],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
)
setup(name='openmm_py',
version='1.0',
py_modules=['openmm_py'],
ext_modules=[extension],
)
|
[
"platform.system",
"os.path.join",
"distutils.core.setup"
] |
[((2753, 2846), 'distutils.core.setup', 'setup', ([], {'name': '"""openmm_py"""', 'version': '"""1.0"""', 'py_modules': "['openmm_py']", 'ext_modules': '[extension]'}), "(name='openmm_py', version='1.0', py_modules=['openmm_py'],\n ext_modules=[extension])\n", (2758, 2846), False, 'from distutils.core import setup\n'), ((1982, 1999), 'platform.system', 'platform.system', ([], {}), '()\n', (1997, 1999), False, 'import platform\n'), ((2383, 2418), 'os.path.join', 'os.path.join', (['openmm_dir', '"""include"""'], {}), "(openmm_dir, 'include')\n", (2395, 2418), False, 'import os\n'), ((2479, 2510), 'os.path.join', 'os.path.join', (['openmm_dir', '"""lib"""'], {}), "(openmm_dir, 'lib')\n", (2491, 2510), False, 'import os\n'), ((2580, 2611), 'os.path.join', 'os.path.join', (['openmm_dir', '"""lib"""'], {}), "(openmm_dir, 'lib')\n", (2592, 2611), False, 'import os\n')]
|
import re
__all__ = ['UntypedProperty', 'TypedProperty', 'TimecodeProperty']
class UntypedProperty(object):
"""
The Property classes form the basis for the FixedInterfaceObject. They
implement a Python property, and store the data in the instances dataVar.
Docstrings can also be provided to improve help() output.
@param initVal, An initial value for the property if None is not supported.
@param doc str, A docstring for the property that will be printed when
help() is called on the object.
@param dataVar str ['__dict__'] The instance dict attribute that should be
used to hold the properties data. It defaults to the objects __dict__, but
could be something else if de-coupled storage is desired.
@param dataName str ['__<id(self)>'] The key to use when storing a value in
dataVar. If ommited, this defaults to a prefixed version of the id of the
object, though this may cause serialisation issues - so its recommended that
this is set to something meaningful. Some objects use Metaclasses to take care
of this automatically to avoid the developer having to manually match the
dataName to the actual attribute name.
@oaram order int [-1] A UI hint as to the 'natural ordering' for this
property when it's displayed in a list.
"""
def __init__(self, initVal=None, doc=None, dataVar=None, dataName=None, order=-1):
super(UntypedProperty, self).__init__()
self.__doc__ = doc
self.value = initVal
self.dataVar = dataVar if dataVar else '__dict__'
# I don't know how well this will serialize but its to avoid you always
# having to name it twice. Though most Factories take care of this now.
self.dataName = dataName if dataName else "__%s" % id(self)
# This may be used for positioning in the ui, this should be > 0
# as -1 indicates that it is unordered or ordering is not important
self.order = order
def __get__(self, obj, cls):
# Allow access to ourself if we're called on the class
if obj is None:
return self
return getattr(obj, self.dataVar).get(self.dataName, None)
def __set__(self, obj, value):
getattr(obj, self.dataVar)[self.dataName] = value
class TypedProperty(UntypedProperty):
"""
Extends the UntypedProperty to allow strict type checking of values.
@param typ Class, Sets will be conformed to being instances of this type of
None.
@exception ValueError or other as per constructing an instance of the
property's typ from the supplied value. ie: typ(value).
"""
def __init__(self, typ, initVal=None, doc=None, dataVar=None, dataName=None,
order=-1):
super(TypedProperty, self).__init__(initVal, doc, dataVar, dataName, order)
self.__doc__ = "[%s]" % typ.__name__
if doc:
self.__doc__ += " %s" % doc
self.typ = typ
def __set__(self, obj, value):
if not isinstance(value, self.typ) and value is not None:
value = self.typ(value)
super(TypedProperty, self).__set__(obj, value)
class TimecodeProperty(TypedProperty):
"""
A specialised property to hold SMPTE timecode values. Valid formats are:
HH:MM:SS:FF (non-drop)
HH:MM:SS;FF or HH:MM:SS.FF (drop)
Any of the above can be suffixed with a floating point frame rate (R) or
prefixed with a sign.
[+-]HH:MM:SS:FF@R
"""
## A regex that can be used to match timecode values, groups are named
## 'hours', 'minutes', 'seconds', 'frames', 'dropFrame' and 'frameRate'
timecodeRegex = re.compile(r'(?P<sign>[+\-]?)(?P<hours>[0-9]{2}):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})(?P<dropFrame>[:;.])(?P<frames>[0-9]{2})(?:@(?P<frameRate>[0-9.]+)|$)')
def __init__(self, doc=None, dataVar=None, dataName=None, order=-1):
super(TimecodeProperty, self).__init__(str, None, doc, dataVar,
dataName, order)
def __set__(self, obj, value):
if value is not None:
if not isinstance(value, str):
raise ValueError("Timecodes must be a string (%s)" % type(value))
if not self.timecodeRegex.match(value):
raise ValueError("Invalid timecode format: '%s' (hh:mm:ss:ff or "+
"[+-]hh:mm:ss[:;.]ff@rr[.rr]])" % value)
super(TypedProperty, self).__set__(obj, value)
def getTimecode(self, value):
"""
@return str, The timecode component of @param value, or an empty string if
no valid timecode is found in the input.
"""
if value is None:
return ''
match = self.timecodeRegex.match(value)
if not match:
return ''
sign = match.group('sign')
sign = sign if sign else ''
hh = int(match.group('hours'))
mm = int(match.group('minutes'))
ss = int(match.group('seconds'))
ff = int(match.group('frames'))
df = match.group('dropFrame')
tc = "%s%02d:%02d:%02d%s%02d" % (sign, hh, mm, ss, df, ff)
return tc
def getFrameRate(self, value):
"""
@return float, The frame rate of @param value else 0 if no valid framerate
is encoded in the value.
"""
rate = 0.0
if value is None:
return rate
match = self.timecodeRegex.match(value)
if not match:
return rate
rr = match.group('frameRate')
if rr:
rate = float(rr)
return rate
|
[
"re.compile"
] |
[((3465, 3638), 're.compile', 're.compile', (['"""(?P<sign>[+\\\\-]?)(?P<hours>[0-9]{2}):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})(?P<dropFrame>[:;.])(?P<frames>[0-9]{2})(?:@(?P<frameRate>[0-9.]+)|$)"""'], {}), "(\n '(?P<sign>[+\\\\-]?)(?P<hours>[0-9]{2}):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})(?P<dropFrame>[:;.])(?P<frames>[0-9]{2})(?:@(?P<frameRate>[0-9.]+)|$)'\n )\n", (3475, 3638), False, 'import re\n')]
|
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import csv
import re
from tests.common.types import (
VID,
Rank,
Prop,
Tag,
Edge,
Vertex,
)
class CSVImporter:
_SRC_VID = ':SRC_VID'
_DST_VID = ':DST_VID'
_VID = ':VID'
_RANK = ':RANK'
def __init__(self, filepath):
self._filepath = filepath
self._insert_stmt = ""
self._create_stmt = ""
self._type = None
def __iter__(self):
with open(self._filepath, 'r') as f:
for i, row in enumerate(csv.reader(f)):
if i == 0:
yield self.parse_header(row)
else:
yield self.process(row)
def process(self, row: list):
if isinstance(self._type, Vertex):
return self.build_vertex_insert_stmt(row)
return self.build_edge_insert_stmt(row)
def build_vertex_insert_stmt(self, row: list):
props = []
for p in self._type.tags[0].props:
col = row[p.index]
props.append(self.value(p.ptype, col))
vid = self._type.vid
id_val = self.value(vid.id_type, row[vid.index])
return f'{self._insert_stmt} {id_val}:({",".join(props)});'
def build_edge_insert_stmt(self, row: list):
props = []
for p in self._type.props:
col = row[p.index]
props.append(self.value(p.ptype, col))
src = self._type.src
dst = self._type.dst
src_vid = self.value(src.id_type, row[src.index])
dst_vid = self.value(dst.id_type, row[dst.index])
if self._type.rank is None:
return f'{self._insert_stmt} {src_vid}->{dst_vid}:({",".join(props)});'
rank = row[self._type.rank.index]
return f'{self._insert_stmt} {src_vid}->{dst_vid}@{rank}:({",".join(props)});'
def value(self, ptype: str, col):
return f'"{col}"' if ptype == 'string' else f'{col}'
def parse_header(self, row):
"""
Only parse the scenario that one tag in each file
"""
for col in row:
if self._SRC_VID in col or self._DST_VID in col:
self._type = Edge()
self.parse_edge(row)
break
if self._VID in col:
self._type = Vertex()
self.parse_vertex(row)
break
if self._type is None:
raise ValueError(f'Invalid csv header: {",".join(row)}')
return self._create_stmt
def parse_edge(self, row):
props = []
name = ''
for i, col in enumerate(row):
if col == self._RANK:
self._type.rank = Rank(i)
continue
m = re.search(r':SRC_VID\((.*)\)', col)
if m:
self._type.src = VID(i, m.group(1))
continue
m = re.search(r':DST_VID\((.*)\)', col)
if m:
self._type.dst = VID(i, m.group(1))
continue
m = re.search(r'(\w+)\.(\w+):(\w+)', col)
if not m:
raise ValueError(f'Invalid csv header format {col}')
g1 = m.group(1)
if not name:
name = g1
assert name == g1, f'Different edge type {g1}'
props.append(Prop(i, m.group(2), m.group(3)))
self._type.name = name
self._type.props = props
pdecl = ','.join(p.name for p in props)
self._insert_stmt = f"INSERT EDGE {name}({pdecl}) VALUES"
pdecl = ','.join(f"`{p.name}` {p.ptype}" for p in props)
self._create_stmt = f"CREATE EDGE IF NOT EXISTS `{name}`({pdecl});"
def parse_vertex(self, row):
tag = Tag()
props = []
for i, col in enumerate(row):
m = re.search(r':VID\((.*)\)', col)
if m:
self._type.vid = VID(i, m.group(1))
continue
m = re.search(r'(\w+)\.(\w+):(\w+)', col)
if not m:
raise ValueError(f'Invalid csv header format {col}')
g1 = m.group(1)
if not tag.name:
tag.name = g1
assert tag.name == g1, f'Different tag name {g1}'
props.append(Prop(i, m.group(2), m.group(3)))
tag.props = props
self._type.tags = [tag]
pdecl = ','.join(p.name for p in tag.props)
self._insert_stmt = f"INSERT VERTEX {tag.name}({pdecl}) VALUES"
pdecl = ','.join(f"`{p.name}` {p.ptype}" for p in tag.props)
self._create_stmt = f"CREATE TAG IF NOT EXISTS `{tag.name}`({pdecl});"
if __name__ == '__main__':
for row in CSVImporter('../data/nba/player.csv'):
print(row)
|
[
"csv.reader",
"tests.common.types.Edge",
"tests.common.types.Rank",
"re.search",
"tests.common.types.Tag",
"tests.common.types.Vertex"
] |
[((3852, 3857), 'tests.common.types.Tag', 'Tag', ([], {}), '()\n', (3855, 3857), False, 'from tests.common.types import VID, Rank, Prop, Tag, Edge, Vertex\n'), ((2865, 2901), 're.search', 're.search', (['""":SRC_VID\\\\((.*)\\\\)"""', 'col'], {}), "(':SRC_VID\\\\((.*)\\\\)', col)\n", (2874, 2901), False, 'import re\n'), ((3012, 3048), 're.search', 're.search', (['""":DST_VID\\\\((.*)\\\\)"""', 'col'], {}), "(':DST_VID\\\\((.*)\\\\)', col)\n", (3021, 3048), False, 'import re\n'), ((3159, 3199), 're.search', 're.search', (['"""(\\\\w+)\\\\.(\\\\w+):(\\\\w+)"""', 'col'], {}), "('(\\\\w+)\\\\.(\\\\w+):(\\\\w+)', col)\n", (3168, 3199), False, 'import re\n'), ((3931, 3963), 're.search', 're.search', (['""":VID\\\\((.*)\\\\)"""', 'col'], {}), "(':VID\\\\((.*)\\\\)', col)\n", (3940, 3963), False, 'import re\n'), ((4074, 4114), 're.search', 're.search', (['"""(\\\\w+)\\\\.(\\\\w+):(\\\\w+)"""', 'col'], {}), "('(\\\\w+)\\\\.(\\\\w+):(\\\\w+)', col)\n", (4083, 4114), False, 'import re\n'), ((683, 696), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (693, 696), False, 'import csv\n'), ((2310, 2316), 'tests.common.types.Edge', 'Edge', ([], {}), '()\n', (2314, 2316), False, 'from tests.common.types import VID, Rank, Prop, Tag, Edge, Vertex\n'), ((2438, 2446), 'tests.common.types.Vertex', 'Vertex', ([], {}), '()\n', (2444, 2446), False, 'from tests.common.types import VID, Rank, Prop, Tag, Edge, Vertex\n'), ((2816, 2823), 'tests.common.types.Rank', 'Rank', (['i'], {}), '(i)\n', (2820, 2823), False, 'from tests.common.types import VID, Rank, Prop, Tag, Edge, Vertex\n')]
|
'''
Created on May 8, 2013
Copyright: <NAME>
License: BSD
convenience functions for interactiveBrokers module
'''
from ib.ext.Contract import Contract
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol,secType='STK',exchange='SMART',currency='USD'):
''' create contract object '''
c = Contract()
c.m_symbol = symbol
c.m_secType= secType
c.m_exchange = exchange
c.m_currency = currency
return c
|
[
"ib.ext.Contract.Contract"
] |
[((424, 434), 'ib.ext.Contract.Contract', 'Contract', ([], {}), '()\n', (432, 434), False, 'from ib.ext.Contract import Contract\n')]
|
from collections import defaultdict
from ..objects import Clause
from ..objects import Statement
from ..structs import KnowledgeMap
from ..objects import Pattern
def parse_clause(words):
assert len(words) == 3
name, relation, node = words
return Clause((name, relation, node))
def parse_chained(words):
chainDict = defaultdict(set)
if len(words) >= 4:
key, *chained = words
clausewords = chained[:3]
chained = chained[3:]
chainDict[key].add(parse_clause(clausewords))
recurse = parse_chained(chained)
for k, v in recurse.items():
if k == 'and':
k = key
for item in v:
chainDict[k].add(item)
elif len(words) > 0 and len(words) < 4:
raise SyntaxError('Clause file not correctly formatted: {}'.format(str(words)))
return chainDict
def parse_chained_clause(words):
assert len(words) >= 3
root = parse_clause(words[:3])
chainDict = parse_chained(words[3:])
return Statement(root, chainDict)
def parse_chained_clauses(sentence):
clauses = sentence.split(',')
return [parse_chained_clause(c.split()) for c in clauses]
def parse_pattern(text):
conditions, outcomes = text.split('then')
conditions = conditions.replace('if', '')
conditions = parse_chained_clauses(conditions)
outcomes = parse_chained_clauses(outcomes)
return Pattern(conditions, outcomes)
def parse_text(text):
kmap = KnowledgeMap()
for sentence in text.split('.'):
sentence = sentence.replace('\n', ' ')
words = sentence.split()
if len(words) == 0:
pass
elif words[0] == 'if':
kmap.teach(parse_pattern(sentence))
elif len(words) >= 3:
kmap.add(parse_chained_clause(words))
else:
raise SyntaxError('Clause file not correctly formatted: {}'.format(str(words)))
return kmap
def parse_file(filename):
with open(filename, 'r') as infile:
return parse_text(infile.read())
|
[
"collections.defaultdict"
] |
[((335, 351), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (346, 351), False, 'from collections import defaultdict\n')]
|
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.response import Response
from visitors.models import Visitor
from visitors.serializers import VisitorSerializer
# Create your views here.
@api_view(['GET', 'POST'])
def visitor_list(request, format=None):
if (request.method == 'GET'):
visitors = Visitor.objects.all()
serializer = VisitorSerializer(visitors, many=True)
return Response(serializer.data)
if(request.method == 'POST'):
serializer = VisitorSerializer(data=request.data)
if (serializer.is_valid()):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def visitor_detail(request, pk, format=None):
try:
visitor = Visitor.objects.get(pk=pk)
except Visitor.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = VisitorSerializer(visitor)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = VisitorSerializer(visitor, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
visitor.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"visitors.models.Visitor.objects.all",
"rest_framework.response.Response",
"visitors.models.Visitor.objects.get",
"visitors.serializers.VisitorSerializer",
"rest_framework.decorators.api_view"
] |
[((242, 267), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (250, 267), False, 'from rest_framework.decorators import api_view\n'), ((802, 836), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'PUT', 'DELETE']"], {}), "(['GET', 'PUT', 'DELETE'])\n", (810, 836), False, 'from rest_framework.decorators import api_view\n'), ((361, 382), 'visitors.models.Visitor.objects.all', 'Visitor.objects.all', ([], {}), '()\n', (380, 382), False, 'from visitors.models import Visitor\n'), ((404, 442), 'visitors.serializers.VisitorSerializer', 'VisitorSerializer', (['visitors'], {'many': '(True)'}), '(visitors, many=True)\n', (421, 442), False, 'from visitors.serializers import VisitorSerializer\n'), ((458, 483), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (466, 483), False, 'from rest_framework.response import Response\n'), ((540, 576), 'visitors.serializers.VisitorSerializer', 'VisitorSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (557, 576), False, 'from visitors.serializers import VisitorSerializer\n'), ((735, 798), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (743, 798), False, 'from rest_framework.response import Response\n'), ((910, 936), 'visitors.models.Visitor.objects.get', 'Visitor.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (929, 936), False, 'from visitors.models import Visitor\n'), ((1082, 1108), 'visitors.serializers.VisitorSerializer', 'VisitorSerializer', (['visitor'], {}), '(visitor)\n', (1099, 1108), False, 'from visitors.serializers import VisitorSerializer\n'), ((1124, 1149), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1132, 1149), False, 'from rest_framework.response import Response\n'), ((662, 719), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (670, 719), False, 'from rest_framework.response import Response\n'), ((985, 1027), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_404_NOT_FOUND'}), '(status=status.HTTP_404_NOT_FOUND)\n', (993, 1027), False, 'from rest_framework.response import Response\n'), ((1206, 1251), 'visitors.serializers.VisitorSerializer', 'VisitorSerializer', (['visitor'], {'data': 'request.data'}), '(visitor, data=request.data)\n', (1223, 1251), False, 'from visitors.serializers import VisitorSerializer\n'), ((1376, 1439), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (1384, 1439), False, 'from rest_framework.response import Response\n'), ((1335, 1360), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1343, 1360), False, 'from rest_framework.response import Response\n'), ((1518, 1561), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (1526, 1561), False, 'from rest_framework.response import Response\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The module file for saos8_facts
"""
DOCUMENTATION = """
module: saos8_facts
short_description: Get facts about saos8 devices.
description:
- Collects facts from network devices running the saos8 operating system. This module
places the facts gathered in the fact tree keyed by the respective resource name. The
facts module will always collect a base set of facts from the device and can enable
or disable collection of additional facts.
version_added: 1.0.0
author:
- <NAME>
notes:
- Tested against SAOS rel_saos5170_8.6.5_ga076
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected to a given subset. Possible
values for this argument include all, default, config, and neighbors. Can specify
a list of values to include a larger subset. Values can also be used with an
initial C(M(!)) to specify that a specific subset should not be collected.
required: false
default: '!config'
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected to a given subset.
Possible values for this argument include all and the resources like interfaces.
Can specify a list of values to include a larger subset. Values can also be
used with an initial C(M(!)) to specify that a specific subset should not be
collected. Valid subsets are 'all', 'interfaces', 'neighbors'
required: false
"""
EXAMPLES = """
- name: Gather all facts
ciena.saos8.saos8_facts:
gather_subset: all
gather_network_resources: all
- name: collect config and default facts
ciena.saos8.saos8_facts:
gather_subset: config
- name: collect everything exception the config
ciena.saos8.saos8_facts:
gather_subset: '!config'
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
ansible_net_gather_network_resources:
description: The list of fact resource subsets collected from the device
returned: always
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.argspec.facts.facts import (
FactsArgs,
)
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.facts.facts import (
Facts,
)
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.saos8 import (
saos8_argument_spec,
)
def main():
"""
Main entry point for module execution
:returns: ansible_facts
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(saos8_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = []
if module.params["gather_subset"] == "!config":
warnings.append(
"default value for `gather_subset` will be changed to `min` from `!config` v2.11 onwards"
)
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == "__main__":
main()
|
[
"ansible.module_utils.basic.AnsibleModule",
"ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.facts.facts.Facts"
] |
[((3652, 3720), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'argument_spec', 'supports_check_mode': '(True)'}), '(argument_spec=argument_spec, supports_check_mode=True)\n', (3665, 3720), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((3957, 3970), 'ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.facts.facts.Facts', 'Facts', (['module'], {}), '(module)\n', (3962, 3970), False, 'from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.facts.facts import Facts\n')]
|
import json
import pytest
from freezegun import freeze_time
from sso.core.logging import create_x_access_log
from sso.tests.factories.user import UserFactory
class TestAppAccessLog:
@pytest.mark.django_db
@freeze_time("2017-06-22 15:50:00.000000+00:00")
def test_user_info_is_logged(self, rf, mocker):
mock_logger = mocker.patch("sso.core.logging.logger")
request = rf.get("/whatever/")
create_x_access_log(request, 200)
mock_logger.info.assert_called_once()
assert json.loads(mock_logger.info.call_args[0][0]) == {
"request_id": "",
"request_time": "2017-06-22 15:50:00",
"sso_user_id": None,
"local_user_id": None,
"path": "/whatever/",
"url": {"domain": "testserver"},
"status": 200,
"ip": None,
"message": "",
"service": "staff-sso test",
}
@pytest.mark.django_db
@freeze_time("2017-06-22 15:50:00.000000+00:00")
def test_log_without_user(self, rf, mocker):
mock_logger = mocker.patch("sso.core.logging.logger")
request = rf.get("/whatever/")
user = UserFactory()
request.user = user
create_x_access_log(request, 200, message="test message")
mock_logger.info.assert_called_once()
assert json.loads(mock_logger.info.call_args[0][0]) == {
"request_id": "",
"request_time": "2017-06-22 15:50:00",
"sso_user_id": str(user.user_id),
"local_user_id": user.id,
"path": "/whatever/",
"url": {"domain": "testserver"},
"status": 200,
"ip": None,
"message": "test message",
"service": "staff-sso test",
}
|
[
"sso.core.logging.create_x_access_log",
"freezegun.freeze_time",
"json.loads",
"sso.tests.factories.user.UserFactory"
] |
[((218, 265), 'freezegun.freeze_time', 'freeze_time', (['"""2017-06-22 15:50:00.000000+00:00"""'], {}), "('2017-06-22 15:50:00.000000+00:00')\n", (229, 265), False, 'from freezegun import freeze_time\n'), ((965, 1012), 'freezegun.freeze_time', 'freeze_time', (['"""2017-06-22 15:50:00.000000+00:00"""'], {}), "('2017-06-22 15:50:00.000000+00:00')\n", (976, 1012), False, 'from freezegun import freeze_time\n'), ((429, 462), 'sso.core.logging.create_x_access_log', 'create_x_access_log', (['request', '(200)'], {}), '(request, 200)\n', (448, 462), False, 'from sso.core.logging import create_x_access_log\n'), ((1179, 1192), 'sso.tests.factories.user.UserFactory', 'UserFactory', ([], {}), '()\n', (1190, 1192), False, 'from sso.tests.factories.user import UserFactory\n'), ((1230, 1287), 'sso.core.logging.create_x_access_log', 'create_x_access_log', (['request', '(200)'], {'message': '"""test message"""'}), "(request, 200, message='test message')\n", (1249, 1287), False, 'from sso.core.logging import create_x_access_log\n'), ((525, 569), 'json.loads', 'json.loads', (['mock_logger.info.call_args[0][0]'], {}), '(mock_logger.info.call_args[0][0])\n', (535, 569), False, 'import json\n'), ((1350, 1394), 'json.loads', 'json.loads', (['mock_logger.info.call_args[0][0]'], {}), '(mock_logger.info.call_args[0][0])\n', (1360, 1394), False, 'import json\n')]
|
import parserBOJ
import openpyxl as xl
parse = parserBOJ.Parse()
wb = xl.Workbook()
ws = wb.active
ws.title = "sheet100"
# 컬럼명 지정
col_names = ['문제 번호', '문제 제목', '맞힌 사람', '제출 횟수', '정답률']
for seq, name in enumerate(col_names):
ws.cell(row=1, column=seq+1, value=name)
row_num = 2
# 데이터 입력
for n, rows in enumerate(parse.processParsing(DEBUG=False, MAX_PAGE=150)):
for seq, value in enumerate(rows):
ws.cell(row=row_num+n, column=seq+1, value=value)
wb.save("BOJ_Parsing.xlsx")
wb.close()
|
[
"parserBOJ.Parse",
"openpyxl.Workbook"
] |
[((48, 65), 'parserBOJ.Parse', 'parserBOJ.Parse', ([], {}), '()\n', (63, 65), False, 'import parserBOJ\n'), ((71, 84), 'openpyxl.Workbook', 'xl.Workbook', ([], {}), '()\n', (82, 84), True, 'import openpyxl as xl\n')]
|
import json
class FieldOperation(object):
def __init__(self, d):
if type(d) is str:
d = json.loads(d)
self.from_dict(d)
def from_dict(self, d):
self.__dict__ = {}
for key, value in d.items():
if type(value) is dict:
value = FieldOperation(value)
self.__dict__[key] = value
fieldOperationType = ""
adaptMachineType = ""
cropSeason = ""
startDate = ""
endDate = ""
links = ""
|
[
"json.loads"
] |
[((120, 133), 'json.loads', 'json.loads', (['d'], {}), '(d)\n', (130, 133), False, 'import json\n')]
|
# import json
# a_dictionary = {"d": 4}
# def add_to_json(channel):
# '''add new channel to the json list file'''
# with open("stored_youtube_channels.json", "r+") as file:
# data = json.load(file)
# data.update(a_dictionary)
# file.seek(0)
# json.dump(data, file)
# # json_tests.py
import json
from datetime import datetime
import os.path
from os import path
from src.config.exc import *
def add_to_channel_file(channel,channel_id,list="default"):
'''
select correct file
read the file
append data to the data from the file
save the file
'''
'''add new channel to the json list file'''
# with open(f"channels_{list}.json", "a+") as file:
# '''
# read the file
# append data to the data from the file
# save the file
# '''
time_obj = datetime.now()
base_list_structure = {
"channels":[
{
"channel_name":channel,
"date_added":time_obj.strftime("%m/%d/%y"),
"channel_id":channel_id
}
]
}
added_structure = {
"channel_name":channel,
"date_added":time_obj.strftime("%m/%d/%y"),
"channel_id":channel_id
}
# try:
# data = json.load(file)
# print(data)
# data.update(list_structure)
#if the file does not exist create it with a structure
if path.exists(f"channels_{list}.json") == False:
print("detected file doesn't exist, creating ..")
with open(f"channels_{list}.json", "w") as file:
json.dump(base_list_structure, file)
print("file created")
else:
with open(f"channels_{list}.json") as file:
data = json.load(file)
# check.check(json.dumps(data, indent=4, sort_keys=True))
# print(data)
val = []
for i in data["channels"]:
val.append(i["channel_name"].lower())
print(val)
if channel.lower() in val:
raise ChannelAlreadyEnteredException
else:
with open(f"channels_{list}.json", "w") as file:
data["channels"].append(added_structure)
json.dump(data, file)
def get_list(list_name):
''' return list of channels and channel ids from a stored list'''
with open(f"channels_{list_name}.json", "r") as file:
data = json.load(file)
print(json.dumps(data, indent=4, sort_keys=True))
channels = []
ids = []
for i in range(len(data["channels"])):
channel = data["channels"][i]["channel_name"]
id = data["channels"][i]["channel_id"]
channels.append(channel)
ids.append(id)
return channels,ids
def get_stored_lists():
'''get the names of every stored list
additional:
get how many channels are in each list
'''
import os
arr = os.listdir('.')
for filename in arr:
if "channels_" in filename:
print(filename)
def get_stored_channels(list_name):
'''return all of the channels in a specified list '''
pass
# channels,channel_ids = get_list("test")
# print(f"channels are {channels}")
# print(f"ids are {channel_ids}")
|
[
"json.dump",
"json.load",
"os.path.exists",
"json.dumps",
"datetime.datetime.now",
"os.listdir"
] |
[((882, 896), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (894, 896), False, 'from datetime import datetime\n'), ((3029, 3044), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3039, 3044), False, 'import os\n'), ((1494, 1530), 'os.path.exists', 'path.exists', (['f"""channels_{list}.json"""'], {}), "(f'channels_{list}.json')\n", (1505, 1530), False, 'from os import path\n'), ((2482, 2497), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2491, 2497), False, 'import json\n'), ((1668, 1704), 'json.dump', 'json.dump', (['base_list_structure', 'file'], {}), '(base_list_structure, file)\n', (1677, 1704), False, 'import json\n'), ((1821, 1836), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1830, 1836), False, 'import json\n'), ((2512, 2554), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)', 'sort_keys': '(True)'}), '(data, indent=4, sort_keys=True)\n', (2522, 2554), False, 'import json\n'), ((2290, 2311), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (2299, 2311), False, 'import json\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.compat import lzip
from pandas.core.dtypes.common import (
is_bool_dtype,
is_numeric_dtype,
is_timedelta64_dtype)
import warnings
import numpy as np
import ray
import itertools
class DataFrame(object):
def __init__(self, df, columns, index=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
df ([ObjectID]): The list of ObjectIDs that contain the dataframe
partitions.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
index (pandas.Index or list): The row index for this dataframe.
"""
assert(len(df) > 0)
self._df = df
self.columns = columns
# this _index object is a pd.DataFrame
# and we use that DataFrame's Index to index the rows.
self._lengths, self._index = _compute_length_and_index.remote(self._df)
if index is not None:
self.index = index
def __str__(self):
return repr(self)
def __repr__(self):
if sum(self._lengths) < 40:
result = repr(to_pandas(self))
return result
head = repr(to_pandas(self.head(20)))
tail = repr(to_pandas(self.tail(20)))
result = head + "\n...\n" + tail
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._index.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._index.index = new_index
index = property(_get_index, _set_index)
def _get__index(self):
"""Get the _index for this DataFrame.
Returns:
The default index.
"""
if isinstance(self._index_cache, ray.local_scheduler.ObjectID):
self._index_cache = ray.get(self._index_cache)
return self._index_cache
def _set__index(self, new__index):
"""Set the _index for this DataFrame.
Args:
new__index: The new default index to set.
"""
self._index_cache = new__index
_index = property(_get__index, _set__index)
def _compute_lengths(self):
"""Updates the stored lengths of DataFrame partions
"""
self._lengths = [_deploy_func.remote(_get_lengths, d)
for d in self._df]
def _get_lengths(self):
"""Gets the lengths for each partition and caches it if it wasn't.
Returns:
A list of integers representing the length of each partition.
"""
if isinstance(self._length_cache, ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
elif isinstance(self._length_cache, list) and \
isinstance(self._length_cache[0],
ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
return self._length_cache
def _set_lengths(self, lengths):
"""Sets the lengths of each partition for this DataFrame.
We use this because we can compute it when creating the DataFrame.
Args:
lengths ([ObjectID or Int]): A list of lengths for each
partition, in order.
"""
self._length_cache = lengths
_lengths = property(_get_lengths, _set_lengths)
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim, self._df[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ftypes, self._df[0]))
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.dtypes, self._df[0]))
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(self._map_partitions(lambda df: df.empty)._df)
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(
ray.get(self._map_partitions(lambda df: df.values)._df))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return (len(self.index), len(self.columns))
def _map_partitions(self, func, index=None):
"""Apply a function on each partition.
Args:
func (callable): The function to Apply.
Returns:
A new DataFrame containing the result of the function.
"""
assert(callable(func))
new_df = [_deploy_func.remote(func, part) for part in self._df]
if index is None:
index = self.index
return DataFrame(new_df, self.columns, index=index)
def _update_inplace(self, df=None, columns=None, index=None):
"""Updates the current DataFrame inplace
"""
assert(len(df) > 0)
if df:
self._df = df
if columns:
self.columns = columns
if index:
self.index = index
self._lengths, self._index = _compute_length_and_index.remote(self._df)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(self._df, new_cols, index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(self._df, new_cols, index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
assert(callable(func))
return self._map_partitions(lambda df: df.applymap(lambda x: func(x)))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(self._df, self.columns, index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
indices = self.index.unique()
chunksize = int(len(indices) / len(self._df))
partitions = [_shuffle.remote(df, indices, chunksize)
for df in self._df]
partitions = ray.get(partitions)
# Transpose the list of dataframes
# TODO find a better way
shuffle = []
for i in range(len(partitions[0])):
shuffle.append([])
for j in range(len(partitions)):
shuffle[i].append(partitions[j][i])
new_dfs = [_local_groupby.remote(part, axis=axis) for part in shuffle]
return DataFrame(new_dfs, self.columns, index=indices)
def reduce_by_index(self, func, axis=0):
"""Perform a reduction based on the row index.
Args:
func (callable): The function to call on the partition
after the groupby.
Returns:
A new DataFrame with the result of the reduction.
"""
return self.groupby(axis=axis)._map_partitions(
func, index=pd.unique(self.index))
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
intermediate_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
sum_of_partitions = self._map_partitions(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only),
index=intermediate_index)
return sum_of_partitions.reduce_by_index(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only))
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
return self._map_partitions(lambda df: df.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return self._map_partitions(lambda df: df.isin(values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isnull)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Note: Triggers a shuffle.
Returns:
A new DataFrame transposed from this DataFrame.
"""
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
temp_columns = self.index
local_transpose = self._map_partitions(
lambda df: df.transpose(*args, **kwargs), index=temp_index)
local_transpose.columns = temp_columns
# Sum will collapse the NAs from the groupby
df = local_transpose.reduce_by_index(
lambda df: df.apply(lambda x: x), axis=1)
# Reassign the columns within partition to self.index.
# We have to use _depoly_func instead of _map_partition due to
# new_labels argument
def _reassign_columns(df, new_labels):
df.columns = new_labels
return df
df._df = [
_deploy_func.remote(
_reassign_columns,
part,
self.index) for part in df._df]
return df
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
if how != 'any' and how != 'all':
raise ValueError("<how> not correctly set.")
def add(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def agg(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def aggregate(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.all(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.any(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def append(self, other, ignore_index=False, verify_integrity=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
if axis == 1:
return self.T.count(axis=0,
level=level,
numeric_only=numeric_only)
else:
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
collapsed_df = sum(
ray.get(
self._map_partitions(
lambda df: df.count(
axis=axis,
level=level,
numeric_only=numeric_only),
index=temp_index)._df))
return collapsed_df
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummax(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummin(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def describe(self, percentiles=None, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def divide(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
for i, idx in other._index.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._df[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._index.iloc[i]
curr_df = self._df[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def floordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
temp_df = self._map_partitions(lambda df: df.get(key, default=default))
return to_pandas(temp_df)
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_dtype_counts(), self._df[0]
)
)
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_ftype_counts(), self._df[0]
)
)
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes))
new_dfs = [self._df[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the first partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.head(num_to_transfer),
self._df[last_index]))
index = self._index.head(n).index
return DataFrame(new_dfs, self.columns, index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmax(axis=axis, skipna=skipna)))
else:
return self.T.idxmax(axis=1, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmin(axis=axis, skipna=skipna)))
else:
return self.T.idxmin(axis=1, skipna=skipna)
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
try:
len(value)
except TypeError:
value = [value for _ in range(len(self.index))]
if len(value) != len(self.index):
raise ValueError(
"Column length provided does not match DataFrame length.")
if loc < 0 or loc > len(self.columns):
raise ValueError(
"Location provided must be higher than 0 and lower than the "
"number of columns.")
if not allow_duplicates and column in self.columns:
raise ValueError(
"Column {} already exists in DataFrame.".format(column))
cumulative = np.cumsum(self._lengths)
partitions = [value[cumulative[i-1]:cumulative[i]]
for i in range(len(cumulative))
if i != 0]
partitions.insert(0, value[:cumulative[0]])
# Because insert is always inplace, we have to create this temp fn.
def _insert(_df, _loc, _column, _part, _allow_duplicates):
_df.insert(_loc, _column, _part, _allow_duplicates)
return _df
self._df = \
[_deploy_func.remote(_insert,
self._df[i],
loc,
column,
partitions[i],
allow_duplicates)
for i in range(len(self._df))]
self.columns = self.columns.insert(loc, column)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.iterrows()), part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
series = map(lambda idx_series_tuple: idx_series_tuple[1], iters)
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part) for part in self._df])
def concat_iters(iterables):
for partitions in zip(*iterables):
series = pd.concat([_series for _, _series in partitions])
series.index = self.index
yield (series.name, series)
return concat_iters(iters)
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name='Pandas'):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.itertuples(index=index, name=name)),
part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
def _replace_index(row_tuple, idx):
# We need to use try-except here because
# isinstance(row_tuple, namedtuple) won't work.
try:
row_tuple = row_tuple._replace(Index=idx)
except AttributeError: # Tuple not namedtuple
row_tuple = (idx,) + row_tuple[1:]
return row_tuple
if index:
iters = itertools.starmap(_replace_index, zip(iters, self.index))
return iters
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def le(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lookup(self, row_labels, col_labels):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mad(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def max(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.max(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.max(axis=1, skipna=None, level=None,
numeric_only=None, **kwargs)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def median(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def memory_usage(self, index=True, deep=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def min(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.min(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.min(axis=1, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
def mod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mode(self, axis=0, numeric_only=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def multiply(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ne(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nlargest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def notna(self):
"""Perform notna across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notnull())
def nsmallest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nunique(self, axis=0, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pipe(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot(self, index=None, columns=None, values=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def plot(self, x=None, y=None, kind='line', ax=None, subplots=False,
sharex=None, sharey=False, layout=None, figsize=None,
use_index=True, title=None, grid=None, legend=True, style=None,
logx=False, logy=False, loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None, rot=None, fontsize=None, colormap=None,
table=False, yerr=None, xerr=None, secondary_y=False,
sort_columns=False, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
popped = to_pandas(self._map_partitions(
lambda df: df.pop(item)))
self._df = self._map_partitions(lambda df: df.drop([item], axis=1))._df
self.columns = self.columns.drop(item)
return popped
def pow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def prod(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def product(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
new_dfs = [_deploy_func.remote(lambda df: df.query(expr, **kwargs),
part) for part in self._df]
if inplace:
self._update_inplace(new_dfs)
else:
return DataFrame(new_dfs, self.columns)
def radd(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rdiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex(self, labels=None, index=None, columns=None, axis=None,
method=None, copy=True, level=None, fill_value=np.nan,
limit=None, tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename(self, mapper=None, index=None, columns=None, axis=None,
copy=True, inplace=False, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reorder_levels(self, order, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into dataframe columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, pd.PeriodIndex):
values = index.asobject.values
elif isinstance(index, pd.DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
_, new_index = _compute_length_and_index.remote(new_obj._df)
new_index = ray.get(new_index).index
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, pd.MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, pd.MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, pd.MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
def rfloordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def round(self, decimals=0, *args, **kwargs):
return self._map_partitions(lambda df: df.round(decimals=decimals,
*args,
**kwargs))
def rpow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rsub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rtruediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select(self, crit, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select_dtypes(self, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sem(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pd.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._index._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pd.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pd.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pd.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pd.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pd.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def set_value(self, index, col, value, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def skew(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def slice_shift(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def squeeze(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def stack(self, level=-1, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def std(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def subtract(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swapaxes(self, axis1, axis2, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swaplevel(self, i=-2, j=-1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tail(self, n=5):
"""Get the last n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the last n rows of this dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes[::-1]))
reverse_dfs = self._df[::-1]
new_dfs = [reverse_dfs[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the last partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.tail(num_to_transfer),
reverse_dfs[last_index]))
new_dfs.reverse()
index = self._index.tail(n).index
return DataFrame(new_dfs, self.columns, index=index)
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_clipboard(self, excel=None, sep=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_csv(self, path_or_buf=None, sep=', ', na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dense(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dict(self, orient='dict', into=dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_feather(self, fname):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail',
private_key=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_hdf(self, path_or_buf, key, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False,
notebook=False, decimal='.', border=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None,
escape=None, encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_panel(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_period(self, freq=None, axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_pickle(self, path, compression='infer', protocol=4):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_records(self, index=True, convert_datetime64=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sparse(self, fill_value=None, kind='block'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding='latin-1', byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_xarray(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def transform(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truncate(self, before=None, after=None, axis=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tshift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_convert(self, tz, axis=0, level=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def unstack(self, level=-1, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def var(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def xs(self, key, axis=0, level=None, drop_level=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getitem__(self, key):
"""Get the column specified by key for this DataFrame.
Args:
key : The column name.
Returns:
A Pandas Series representing the value fo the column.
"""
result_column_chunks = self._map_partitions(
lambda df: df.__getitem__(key))
return to_pandas(result_column_chunks)
def __setitem__(self, key, value):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __len__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __unicode__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __invert__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __hash__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iter__(self):
"""Iterate over the columns
Returns:
An Iterator over the columns of the dataframe.
"""
return iter(self.columns)
def __contains__(self, key):
return key in self.columns
def __nonzero__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __bool__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __abs__(self):
"""Creates a modified DataFrame by elementwise taking the absolute value
Returns:
A modified DataFrame
"""
return self.abs()
def __round__(self, decimals=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array__(self, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array_wrap__(self, result, context=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getstate__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __setstate__(self, state):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __delitem__(self, key):
"""Delete an item by key. `del a[key]` for example.
Operation happnes in place.
Args:
key: key to delete
"""
def del_helper(df):
df.__delitem__(key)
return df
self._df = self._map_partitions(del_helper)._df
self.columns = self.columns.drop(key)
def __finalize__(self, other, method=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __copy__(self, deep=True):
"""Make a copy using Ray.DataFrame.copy method
Args:
deep: Boolean, deep copy or not.
Currently we do not support deep copy.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""Make a -deep- copy using Ray.DataFrame.copy method
This is equivalent to copy(deep=True).
Args:
memo: No effect. Just to comply with Pandas API.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=True)
def __and__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __or__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __xor__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __lt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __le__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __gt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ge__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __eq__(self, other):
"""Computes the equality of this DataFrame with another
Returns:
True, if the DataFrames are equal. False otherwise.
"""
return self.equals(other)
def __ne__(self, other):
"""Checks that this DataFrame is not equal to another
Returns:
True, if the DataFrames are not equal. False otherwise.
"""
return not self.equals(other)
def __add__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iadd__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __imul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __pow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ipow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __isub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __neg__(self):
"""Computes an element wise negative DataFrame
Returns:
A modified DataFrame where every element is the negation of before
"""
for t in self.dtypes:
if not (is_bool_dtype(t)
or is_numeric_dtype(t)
or is_timedelta64_dtype(t)):
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(t))
return self._map_partitions(lambda df: df.__neg__())
def __floordiv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __truediv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mod__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sizeof__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def __doc__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def blocks(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def style(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iat(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __rsub__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _Loc_Indexer
return _Loc_Indexer(self)
@property
def is_copy(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __itruediv__(other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __div__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ix(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _iLoc_Indexer
return _iLoc_Indexer(self)
def _get_lengths(df):
"""Gets the length of the dataframe.
Args:
df: A remote pd.DataFrame object.
Returns:
Returns an integer length of the dataframe object. If the attempt
fails, returns 0 as the length.
"""
try:
return len(df)
# Because we sometimes have cases where we have summary statistics in our
# DataFrames
except TypeError:
return 0
@ray.remote
def _shuffle(df, indices, chunksize):
"""Shuffle data by sending it through the Ray Store.
Args:
df (pd.DataFrame): The pandas DataFrame to shuffle.
indices ([any]): The list of indices for the DataFrame.
chunksize (int): The number of indices to send.
Returns:
The list of pd.DataFrame objects in order of their assignment. This
order is important because it determines which task will get the data.
"""
i = 0
partition = []
while len(indices) > chunksize:
oids = df.reindex(indices[:chunksize])
partition.append(oids)
indices = indices[chunksize:]
i += 1
else:
oids = df.reindex(indices)
partition.append(oids)
return partition
@ray.remote
def _local_groupby(df_rows, axis=0):
"""Apply a groupby on this partition for the blocks sent to it.
Args:
df_rows ([pd.DataFrame]): A list of dataframes for this partition. Goes
through the Ray object store.
Returns:
A DataFrameGroupBy object from the resulting groupby.
"""
concat_df = pd.concat(df_rows, axis=axis)
return concat_df.groupby(concat_df.index)
@ray.remote
def _deploy_func(func, dataframe, *args):
"""Deploys a function for the _map_partitions call.
Args:
dataframe (pandas.DataFrame): The pandas DataFrame for this partition.
Returns:
A futures object representing the return value of the function
provided.
"""
if len(args) == 0:
return func(dataframe)
else:
return func(dataframe, *args)
def from_pandas(df, npartitions=None, chunksize=None, sort=True):
"""Converts a pandas DataFrame to a Ray DataFrame.
Args:
df (pandas.DataFrame): The pandas DataFrame to convert.
npartitions (int): The number of partitions to split the DataFrame
into. Has priority over chunksize.
chunksize (int): The number of rows to put in each partition.
sort (bool): Whether or not to sort the df as it is being converted.
Returns:
A new Ray DataFrame object.
"""
if sort and not df.index.is_monotonic_increasing:
df = df.sort_index(ascending=True)
if npartitions is not None:
chunksize = int(len(df) / npartitions)
elif chunksize is None:
raise ValueError("The number of partitions or chunksize must be set.")
temp_df = df
dataframes = []
lengths = []
while len(temp_df) > chunksize:
t_df = temp_df[:chunksize]
lengths.append(len(t_df))
# reset_index here because we want a pd.RangeIndex
# within the partitions. It is smaller and sometimes faster.
t_df = t_df.reset_index(drop=True)
top = ray.put(t_df)
dataframes.append(top)
temp_df = temp_df[chunksize:]
else:
temp_df = temp_df.reset_index(drop=True)
dataframes.append(ray.put(temp_df))
lengths.append(len(temp_df))
return DataFrame(dataframes, df.columns, index=df.index)
def to_pandas(df):
"""Converts a Ray DataFrame to a pandas DataFrame/Series.
Args:
df (ray.DataFrame): The Ray DataFrame to convert.
Returns:
A new pandas DataFrame.
"""
pd_df = pd.concat(ray.get(df._df))
pd_df.index = df.index
pd_df.columns = df.columns
return pd_df
@ray.remote(num_return_vals=2)
def _compute_length_and_index(dfs):
"""Create a default index, which is a RangeIndex
Returns:
The pd.RangeIndex object that represents this DataFrame.
"""
lengths = ray.get([_deploy_func.remote(_get_lengths, d)
for d in dfs])
dest_indices = {"partition":
[i for i in range(len(lengths))
for j in range(lengths[i])],
"index_within_partition":
[j for i in range(len(lengths))
for j in range(lengths[i])]}
return lengths, pd.DataFrame(dest_indices)
|
[
"pandas.core.dtypes.cast.maybe_upcast_putmask",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.common.is_numeric_dtype",
"ray.put",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.DataFrame",
"ray.remote",
"numpy.cumsum",
"pandas.concat",
"pandas.compat.lzip",
"pandas.api.types.is_scalar",
"pandas.core.index._ensure_index_from_sequences",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"ray.get",
"numpy.dtype",
"pandas.unique",
"numpy.array",
"warnings.warn",
"itertools.chain.from_iterable"
] |
[((93466, 93495), 'ray.remote', 'ray.remote', ([], {'num_return_vals': '(2)'}), '(num_return_vals=2)\n', (93476, 93495), False, 'import ray\n'), ((91212, 91241), 'pandas.concat', 'pd.concat', (['df_rows'], {'axis': 'axis'}), '(df_rows, axis=axis)\n', (91221, 91241), True, 'import pandas as pd\n'), ((9073, 9092), 'ray.get', 'ray.get', (['partitions'], {}), '(partitions)\n', (9080, 9092), False, 'import ray\n'), ((36808, 36832), 'numpy.cumsum', 'np.cumsum', (['self._lengths'], {}), '(self._lengths)\n', (36817, 36832), True, 'import numpy as np\n'), ((38467, 38503), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['iters'], {}), '(iters)\n', (38496, 38503), False, 'import itertools\n'), ((40498, 40534), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['iters'], {}), '(iters)\n', (40527, 40534), False, 'import itertools\n'), ((55313, 55352), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['inplace', '"""inplace"""'], {}), "(inplace, 'inplace')\n", (55332, 55352), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((61969, 61986), 'pandas.api.types.is_scalar', 'is_scalar', (['labels'], {}), '(labels)\n', (61978, 61986), False, 'from pandas.api.types import is_scalar\n'), ((63750, 63789), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['inplace', '"""inplace"""'], {}), "(inplace, 'inplace')\n", (63769, 63789), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((65283, 65326), 'pandas.core.index._ensure_index_from_sequences', '_ensure_index_from_sequences', (['arrays', 'names'], {}), '(arrays, names)\n', (65311, 65326), False, 'from pandas.core.index import _ensure_index_from_sequences\n'), ((92858, 92871), 'ray.put', 'ray.put', (['t_df'], {}), '(t_df)\n', (92865, 92871), False, 'import ray\n'), ((93371, 93386), 'ray.get', 'ray.get', (['df._df'], {}), '(df._df)\n', (93378, 93386), False, 'import ray\n'), ((94075, 94101), 'pandas.DataFrame', 'pd.DataFrame', (['dest_indices'], {}), '(dest_indices)\n', (94087, 94101), True, 'import pandas as pd\n'), ((2390, 2416), 'ray.get', 'ray.get', (['self._index_cache'], {}), '(self._index_cache)\n', (2397, 2416), False, 'import ray\n'), ((3230, 3257), 'ray.get', 'ray.get', (['self._length_cache'], {}), '(self._length_cache)\n', (3237, 3257), False, 'import ray\n'), ((32692, 32707), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (32700, 32707), True, 'import numpy as np\n'), ((56620, 56638), 'ray.get', 'ray.get', (['new_index'], {}), '(new_index)\n', (56627, 56638), False, 'import ray\n'), ((62000, 62275), 'warnings.warn', 'warnings.warn', (['"""set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas."""', 'FutureWarning'], {'stacklevel': '(2)'}), '(\n \'set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas.\'\n , FutureWarning, stacklevel=2)\n', (62013, 62275), False, 'import warnings\n'), ((62439, 62636), 'warnings.warn', 'warnings.warn', (['"""set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning."""', 'FutureWarning'], {'stacklevel': '(2)'}), '(\n """set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning."""\n , FutureWarning, stacklevel=2)\n', (62452, 62636), False, 'import warnings\n'), ((69095, 69116), 'numpy.array', 'np.array', (['sizes[::-1]'], {}), '(sizes[::-1])\n', (69103, 69116), True, 'import numpy as np\n'), ((93026, 93042), 'ray.put', 'ray.put', (['temp_df'], {}), '(temp_df)\n', (93033, 93042), False, 'import ray\n'), ((3455, 3482), 'ray.get', 'ray.get', (['self._length_cache'], {}), '(self._length_cache)\n', (3462, 3482), False, 'import ray\n'), ((9896, 9917), 'pandas.unique', 'pd.unique', (['self.index'], {}), '(self.index)\n', (9905, 9917), True, 'import pandas as pd\n'), ((11004, 11017), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (11012, 11017), True, 'import numpy as np\n'), ((26753, 26789), 'ray.get', 'ray.get', (["other._df[idx['partition']]"], {}), "(other._df[idx['partition']])\n", (26760, 26789), False, 'import ray\n'), ((27420, 27430), 'ray.get', 'ray.get', (['r'], {}), '(r)\n', (27427, 27430), False, 'import ray\n'), ((34129, 34142), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (34137, 34142), True, 'import numpy as np\n'), ((34935, 34948), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (34943, 34948), True, 'import numpy as np\n'), ((39205, 39254), 'pandas.concat', 'pd.concat', (['[_series for _, _series in partitions]'], {}), '([_series for _, _series in partitions])\n', (39214, 39254), True, 'import pandas as pd\n'), ((57237, 57279), 'pandas.compat.lzip', 'lzip', (['self.index.levels', 'self.index.labels'], {}), '(self.index.levels, self.index.labels)\n', (57241, 57279), False, 'from pandas.compat import lzip\n'), ((86289, 86305), 'pandas.core.dtypes.common.is_bool_dtype', 'is_bool_dtype', (['t'], {}), '(t)\n', (86302, 86305), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((86329, 86348), 'pandas.core.dtypes.common.is_numeric_dtype', 'is_numeric_dtype', (['t'], {}), '(t)\n', (86345, 86348), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((86372, 86395), 'pandas.core.dtypes.common.is_timedelta64_dtype', 'is_timedelta64_dtype', (['t'], {}), '(t)\n', (86392, 86395), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((55842, 55875), 'pandas._libs.lib.maybe_convert_objects', 'lib.maybe_convert_objects', (['values'], {}), '(values)\n', (55867, 55875), False, 'from pandas._libs import lib\n'), ((56432, 56474), 'pandas.core.dtypes.cast.maybe_upcast_putmask', 'maybe_upcast_putmask', (['values', 'mask', 'np.nan'], {}), '(values, mask, np.nan)\n', (56452, 56474), False, 'from pandas.core.dtypes.cast import maybe_upcast_putmask\n')]
|
import json
import os
import pandas as pd
from src import DATA_FOLDER, UNZIPED_FOLDER_NAME
from src.io import CNAE_JSON_NAME, NATJU_JSON_NAME, QUAL_SOCIO_JSON_NAME, MOTIVOS_JSON_NAME, PAIS_JSON_NAME, \
MUNIC_JSON_NAME
from src.io.get_last_ref_date import main as get_last_ref_date
def main(ref_date=None):
ref_date = ref_date or get_last_ref_date()
path_unziped = os.path.join(DATA_FOLDER, ref_date, UNZIPED_FOLDER_NAME)
list_all_unziped_files = os.listdir(path_unziped)
for file in list_all_unziped_files:
path_file = os.path.join(path_unziped, file)
if "CNAECSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=CNAE_JSON_NAME)
if "NATJUCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=NATJU_JSON_NAME)
if "QUALSCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=QUAL_SOCIO_JSON_NAME)
if "MOTICSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=MOTIVOS_JSON_NAME)
if "PAISCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=PAIS_JSON_NAME)
if "MUNICCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=MUNIC_JSON_NAME)
def create_json(path_file, path_unziped, json_name):
df = pd.read_csv(path_file, sep=';', encoding='cp1252', header=None)
df.sort_values(df.columns[0], inplace=True)
_dict = dict(df.values)
path_json = os.path.join(path_unziped, json_name)
with open(path_json, 'w', encoding='utf-8') as f:
print(f"creating: '{path_json}'", end=' ... ', flush=True)
json.dump(_dict, f, ensure_ascii=False)
print('done!')
return _dict
if __name__ == '__main__':
main()
|
[
"json.dump",
"src.io.get_last_ref_date.main",
"pandas.read_csv",
"os.path.join",
"os.listdir"
] |
[((393, 449), 'os.path.join', 'os.path.join', (['DATA_FOLDER', 'ref_date', 'UNZIPED_FOLDER_NAME'], {}), '(DATA_FOLDER, ref_date, UNZIPED_FOLDER_NAME)\n', (405, 449), False, 'import os\n'), ((480, 504), 'os.listdir', 'os.listdir', (['path_unziped'], {}), '(path_unziped)\n', (490, 504), False, 'import os\n'), ((1510, 1573), 'pandas.read_csv', 'pd.read_csv', (['path_file'], {'sep': '""";"""', 'encoding': '"""cp1252"""', 'header': 'None'}), "(path_file, sep=';', encoding='cp1252', header=None)\n", (1521, 1573), True, 'import pandas as pd\n'), ((1669, 1706), 'os.path.join', 'os.path.join', (['path_unziped', 'json_name'], {}), '(path_unziped, json_name)\n', (1681, 1706), False, 'import os\n'), ((353, 372), 'src.io.get_last_ref_date.main', 'get_last_ref_date', ([], {}), '()\n', (370, 372), True, 'from src.io.get_last_ref_date import main as get_last_ref_date\n'), ((567, 599), 'os.path.join', 'os.path.join', (['path_unziped', 'file'], {}), '(path_unziped, file)\n', (579, 599), False, 'import os\n'), ((1839, 1878), 'json.dump', 'json.dump', (['_dict', 'f'], {'ensure_ascii': '(False)'}), '(_dict, f, ensure_ascii=False)\n', (1848, 1878), False, 'import json\n')]
|
#!env python
import aoc
import collections
import pprint
import re
ISINT = re.compile(r'^-?[0-9]+$')
def parse_data(lines):
return [line.split() for line in lines]
def valueof(v, registers):
if v is None:
return None
if ISINT.match(v):
return int(v)
return registers[v]
def solve1(data):
registers = collections.defaultdict(int)
pc = 0
soundplayed = 0
while 0 <= pc < len(data):
instr = data[pc][0]
v1 = data[pc][1]
v2 = data[pc][2] if len(data[pc]) > 2 else None
pc += 1
if instr == 'snd':
soundplayed = valueof(v1, registers)
elif instr == 'rcv':
if valueof(v1, registers) != 0:
return ('Last sound played', soundplayed)
elif instr == 'set':
registers[v1] = valueof(v2, registers)
elif instr == 'add':
registers[v1] += valueof(v2, registers)
elif instr == 'mul':
registers[v1] *= valueof(v2, registers)
elif instr == 'mod':
registers[v1] = registers[v1] % valueof(v2, registers)
elif instr == 'jgz':
if valueof(v1, registers) > 0:
pc += valueof(v2, registers) - 1
return "terminated"
def program(data, pid, rcvqueue, sndqueue):
registers = collections.defaultdict()
registers['p'] = pid
pc = 0
sendcount = 0
terminated = False
while 0 <= pc < len(data) and not terminated:
instr = data[pc][0]
v1 = data[pc][1]
v2 = data[pc][2] if len(data[pc]) > 2 else None
pc += 1
if instr == 'snd':
sndqueue.appendleft(valueof(v1, registers))
sendcount += 1
elif instr == 'rcv':
if len(rcvqueue) == 0:
yield sendcount
try:
registers[v1] = rcvqueue.pop()
except IndexError:
terminated = True
elif instr == 'set':
registers[v1] = valueof(v2, registers)
elif instr == 'add':
registers[v1] += valueof(v2, registers)
elif instr == 'mul':
registers[v1] *= valueof(v2, registers)
elif instr == 'mod':
registers[v1] = registers[v1] % valueof(v2, registers)
elif instr == 'jgz':
if valueof(v1, registers) > 0:
pc += valueof(v2, registers) - 1
yield sendcount
def solve2(data):
queues = [collections.deque(), collections.deque()]
programs = [program(data, 0, queues[0], queues[1]),
program(data, 1, queues[1], queues[0])]
current = 0
returns = [None, None]
while 1:
try:
returns[current] = next(programs[current])
except StopIteration:
return returns
current = (current + 1) % 2
lines = [
'set a 1',
'add a 2',
'mul a a',
'mod a 5',
'snd a',
'set a 0',
'rcv a',
'jgz a -1',
'set a 1',
'jgz a -2',
]
if __name__ == '__main__':
lines = aoc.input_lines(day=18)
data = parse_data(lines)
pprint.pprint(solve1(data))
pprint.pprint(solve2(data))
|
[
"collections.defaultdict",
"collections.deque",
"aoc.input_lines",
"re.compile"
] |
[((78, 102), 're.compile', 're.compile', (['"""^-?[0-9]+$"""'], {}), "('^-?[0-9]+$')\n", (88, 102), False, 'import re\n'), ((345, 373), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (368, 373), False, 'import collections\n'), ((1315, 1340), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (1338, 1340), False, 'import collections\n'), ((3017, 3040), 'aoc.input_lines', 'aoc.input_lines', ([], {'day': '(18)'}), '(day=18)\n', (3032, 3040), False, 'import aoc\n'), ((2442, 2461), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2459, 2461), False, 'import collections\n'), ((2463, 2482), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2480, 2482), False, 'import collections\n')]
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_fee import GetTransactionDetailsByTransactionIDResponseItemFee
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_recipients import GetTransactionDetailsByTransactionIDResponseItemRecipients
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_senders import GetTransactionDetailsByTransactionIDResponseItemSenders
from cryptoapis.model.list_transactions_by_address_response_item_blockchain_specific import ListTransactionsByAddressResponseItemBlockchainSpecific
globals()['GetTransactionDetailsByTransactionIDResponseItemFee'] = GetTransactionDetailsByTransactionIDResponseItemFee
globals()['GetTransactionDetailsByTransactionIDResponseItemRecipients'] = GetTransactionDetailsByTransactionIDResponseItemRecipients
globals()['GetTransactionDetailsByTransactionIDResponseItemSenders'] = GetTransactionDetailsByTransactionIDResponseItemSenders
globals()['ListTransactionsByAddressResponseItemBlockchainSpecific'] = ListTransactionsByAddressResponseItemBlockchainSpecific
from cryptoapis.model.list_transactions_by_address_response_item import ListTransactionsByAddressResponseItem
class TestListTransactionsByAddressResponseItem(unittest.TestCase):
"""ListTransactionsByAddressResponseItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testListTransactionsByAddressResponseItem(self):
"""Test ListTransactionsByAddressResponseItem"""
# FIXME: construct object with mandatory attributes with example values
# model = ListTransactionsByAddressResponseItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((2404, 2419), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2417, 2419), False, 'import unittest\n')]
|
from __future__ import unicode_literals
from django.db import models
class DemographicInformation(models.Model):
session_id = models.CharField(max_length=32, blank=True)
# This could be used to store different platforms such as android,
# ios, web if different identification methods are used for each one.
platform = models.CharField(max_length=256, default='web')
gender = models.CharField(max_length=32)
qiraah = models.CharField(max_length=32, blank=True, null=True)
age = models.CharField(max_length=32)
ethnicity = models.CharField(max_length=32, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
class AnnotatedRecording(models.Model):
file = models.FileField(blank=True, null=True)
surah_num = models.IntegerField(blank=True, null=True)
ayah_num = models.IntegerField(blank=True, null=True)
hash_string = models.CharField(max_length=32)
recitation_mode = models.CharField(max_length=32, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True) # TODO(implement timeout)
session_id = models.CharField(max_length=32)
is_evaluated = models.BooleanField(default=False)
associated_demographic = models.ForeignKey(DemographicInformation,
on_delete=models.SET_NULL,
null=True, blank=True, default=None)
|
[
"django.db.models.FileField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((132, 175), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)'}), '(max_length=32, blank=True)\n', (148, 175), False, 'from django.db import models\n'), ((336, 383), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'default': '"""web"""'}), "(max_length=256, default='web')\n", (352, 383), False, 'from django.db import models\n'), ((397, 428), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (413, 428), False, 'from django.db import models\n'), ((442, 496), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)', 'null': '(True)'}), '(max_length=32, blank=True, null=True)\n', (458, 496), False, 'from django.db import models\n'), ((507, 538), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (523, 538), False, 'from django.db import models\n'), ((555, 609), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)', 'null': '(True)'}), '(max_length=32, blank=True, null=True)\n', (571, 609), False, 'from django.db import models\n'), ((626, 665), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (646, 665), False, 'from django.db import models\n'), ((719, 758), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (735, 758), False, 'from django.db import models\n'), ((775, 817), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (794, 817), False, 'from django.db import models\n'), ((833, 875), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (852, 875), False, 'from django.db import models\n'), ((894, 925), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (910, 925), False, 'from django.db import models\n'), ((948, 1002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)', 'null': '(True)'}), '(max_length=32, blank=True, null=True)\n', (964, 1002), False, 'from django.db import models\n'), ((1019, 1058), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1039, 1058), False, 'from django.db import models\n'), ((1103, 1134), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (1119, 1134), False, 'from django.db import models\n'), ((1154, 1188), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1173, 1188), False, 'from django.db import models\n'), ((1218, 1328), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DemographicInformation'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)', 'default': 'None'}), '(DemographicInformation, on_delete=models.SET_NULL, null=\n True, blank=True, default=None)\n', (1235, 1328), False, 'from django.db import models\n')]
|
""" Storage subsystem's API: responsible of communication with storage service
"""
import logging
from pprint import pformat
from aiohttp import web
from yarl import URL
from servicelib.rest_responses import unwrap_envelope
from .storage_config import get_client_session, get_storage_config
log = logging.getLogger(__name__)
def _get_storage_client(app: web.Application):
cfg = get_storage_config(app)
# storage service API endpoint
endpoint = URL.build(scheme="http", host=cfg["host"], port=cfg["port"]).with_path(
cfg["version"]
)
session = get_client_session(app)
return session, endpoint
async def copy_data_folders_from_project(
app, source_project, destination_project, nodes_map, user_id
):
# TODO: optimize if project has actualy data or not before doing the call
client, api_endpoint = _get_storage_client(app)
# /simcore-s3/folders:
url = (api_endpoint / "simcore-s3/folders").with_query(user_id=user_id)
async with client.post(
url,
json={
"source": source_project,
"destination": destination_project,
"nodes_map": nodes_map,
},
ssl=False,
) as resp:
payload = await resp.json()
updated_project, error = unwrap_envelope(payload)
if error:
msg = "Cannot copy project data in storage: %s" % pformat(error)
log.error(msg)
# TODO: should reconstruct error and rethrow same exception as storage service?
raise web.HTTPServiceUnavailable(reason=msg)
return updated_project
async def _delete(session, target_url):
async with session.delete(target_url, ssl=False) as resp:
log.info(
"delete_data_folders_of_project request responded with status %s",
resp.status,
)
# NOTE: context will automatically close connection
async def delete_data_folders_of_project(app, project_id, user_id):
# SEE api/specs/storage/v0/openapi.yaml
session, api_endpoint = _get_storage_client(app)
url = (api_endpoint / f"simcore-s3/folders/{project_id}").with_query(
user_id=user_id
)
await _delete(session, url)
async def delete_data_folders_of_project_node(
app, project_id: str, node_id: str, user_id: str
):
# SEE api/specs/storage/v0/openapi.yaml
session, api_endpoint = _get_storage_client(app)
url = (api_endpoint / f"simcore-s3/folders/{project_id}").with_query(
user_id=user_id, node_id=node_id
)
await _delete(session, url)
|
[
"pprint.pformat",
"aiohttp.web.HTTPServiceUnavailable",
"yarl.URL.build",
"servicelib.rest_responses.unwrap_envelope",
"logging.getLogger"
] |
[((302, 329), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (319, 329), False, 'import logging\n'), ((1271, 1295), 'servicelib.rest_responses.unwrap_envelope', 'unwrap_envelope', (['payload'], {}), '(payload)\n', (1286, 1295), False, 'from servicelib.rest_responses import unwrap_envelope\n'), ((464, 524), 'yarl.URL.build', 'URL.build', ([], {'scheme': '"""http"""', 'host': "cfg['host']", 'port': "cfg['port']"}), "(scheme='http', host=cfg['host'], port=cfg['port'])\n", (473, 524), False, 'from yarl import URL\n'), ((1528, 1566), 'aiohttp.web.HTTPServiceUnavailable', 'web.HTTPServiceUnavailable', ([], {'reason': 'msg'}), '(reason=msg)\n', (1554, 1566), False, 'from aiohttp import web\n'), ((1376, 1390), 'pprint.pformat', 'pformat', (['error'], {}), '(error)\n', (1383, 1390), False, 'from pprint import pformat\n')]
|
import glob
import re
import pkgutil
import kattiskitten.languages as languages
SUPPORTED_LANGUAGES = []
LANGUAGE_EXTENSIONS = {}
CONFIGS = {}
for importer, language, ispkg in pkgutil.iter_modules(languages.__path__):
SUPPORTED_LANGUAGES.append(language)
config = importer.find_module(language).load_module(language)
LANGUAGE_EXTENSIONS[config.file_extension] = language
CONFIGS[language] = config
def get_config(language):
if language not in CONFIGS:
raise ValueError(f"Language not supported. Supported languages are: {', '.join(SUPPORTED_LANGUAGES)}")
return CONFIGS[language]
def determine_language(problem):
solution = glob.glob(f"./{problem}/solution.*")
if len(solution) < 1:
raise ValueError("Couldn't find any program matching patten (solution.*)")
if len(solution) > 1:
raise ValueError(
"Found more than one program matching patten (solution.*). It currently only supports one")
m = re.search(r".*\.(.+?)$", solution[0])
if m:
extension = m.group(1)
language = LANGUAGE_EXTENSIONS[extension]
if not language:
raise ValueError(
f"Couldn't find supported language with extension {extension}")
return language
|
[
"pkgutil.iter_modules",
"re.search",
"glob.glob"
] |
[((178, 218), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['languages.__path__'], {}), '(languages.__path__)\n', (198, 218), False, 'import pkgutil\n'), ((667, 703), 'glob.glob', 'glob.glob', (['f"""./{problem}/solution.*"""'], {}), "(f'./{problem}/solution.*')\n", (676, 703), False, 'import glob\n'), ((979, 1016), 're.search', 're.search', (['""".*\\\\.(.+?)$"""', 'solution[0]'], {}), "('.*\\\\.(.+?)$', solution[0])\n", (988, 1016), False, 'import re\n')]
|
import spacy
from spacy.tokens import Doc
from spacy.vocab import Vocab
TEST_TOKENS = ['Une', 'banque', 'est', 'une', 'institution', 'financière', '.', '5']
TEST_POS = ['DET', 'NOUN', 'AUX', 'DET', 'NOUN', 'ADJ', 'PUNCT', 'NUM']
TEST_SPACES = [True] * len(TEST_TOKENS)
def test_single_UPOS_contextual() -> None:
french_model = spacy.load("fr_single_upos2usas_contextual")
doc = Doc(Vocab(), words=TEST_TOKENS, spaces=TEST_SPACES, pos=TEST_POS)
output = french_model(doc)
expected_output = [
['Z5'],
['I1.1', 'X2.6+', 'M1', 'I1/H1', 'I1.1/I2.1c', 'W3/M4', 'A9+/H1', 'O2', 'M6'],
['M6'],
['Z5'],
['S5+c', 'S7.1+', 'H1c', 'S1.1.1', 'T2+'],
['Z99'],
['PUNCT'],
['N1']
]
assert len(expected_output) == len(output)
for token_index, token in enumerate(output):
assert expected_output[token_index] == token._.pymusas_tags
assert [(token_index, token_index + 1)] == token._.pymusas_mwe_indexes
|
[
"spacy.load",
"spacy.vocab.Vocab"
] |
[((335, 379), 'spacy.load', 'spacy.load', (['"""fr_single_upos2usas_contextual"""'], {}), "('fr_single_upos2usas_contextual')\n", (345, 379), False, 'import spacy\n'), ((394, 401), 'spacy.vocab.Vocab', 'Vocab', ([], {}), '()\n', (399, 401), False, 'from spacy.vocab import Vocab\n')]
|
from spacy.lang.zh import Chinese
from spacy.tokens import Token
nlp = Chinese()
# 注册词符的扩展属性"is_country",其默认值是False
Token.set_extension("is_country", default=False)
# 处理文本,将词符"新加坡"的is_country属性设置为True
doc = nlp("我住在新加坡。")
doc[3]._.is_country = True
# 对所有词符打印词符文本及is_country属性
print([(token.text, token._.is_country) for token in doc])
|
[
"spacy.tokens.Token.set_extension",
"spacy.lang.zh.Chinese"
] |
[((72, 81), 'spacy.lang.zh.Chinese', 'Chinese', ([], {}), '()\n', (79, 81), False, 'from spacy.lang.zh import Chinese\n'), ((118, 166), 'spacy.tokens.Token.set_extension', 'Token.set_extension', (['"""is_country"""'], {'default': '(False)'}), "('is_country', default=False)\n", (137, 166), False, 'from spacy.tokens import Token\n')]
|
from charPairs import CharPairs
from decimal import *
#Word Similarity Algorithm
#Similarity(string1, string2) = 2 * number of incommon char. pairs / sum of total number of char. pairs in each string
class similarity:
def __init__(self,string1, string2):
#get character pairs for string1
strChar1 = CharPairs(string1)
self.charPair1 = strChar1.getCharPairs()
self.charPair1Count = strChar1.getCharPairCount()
self.string1 = string1.lower()
#get character pairs for string2
strChar2 = CharPairs(string2)
self.charPair2 = strChar2.getCharPairs()
self.charPair2Count = strChar2.getCharPairCount()
self.string2 = string2.lower()
#run steps
self.find_in_common_char_pairs()
self.calculate_similarity()
def find_in_common_char_pairs(self):
self.incommon = set(self.charPair1).intersection(self.charPair2)
self.incommon_count = 0
for i in self.incommon:
self.incommon_count += 1
def calculate_similarity(self):
numerator = 2 * self.incommon_count
denominator = self.charPair1Count + self.charPair2Count
getcontext().prec = 4
self.sim = Decimal(numerator) / Decimal(denominator)
def get_sim(self):
return self.sim
|
[
"charPairs.CharPairs"
] |
[((319, 337), 'charPairs.CharPairs', 'CharPairs', (['string1'], {}), '(string1)\n', (328, 337), False, 'from charPairs import CharPairs\n'), ((544, 562), 'charPairs.CharPairs', 'CharPairs', (['string2'], {}), '(string2)\n', (553, 562), False, 'from charPairs import CharPairs\n')]
|
import os
import jeev
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="jeev",
version=jeev.version.split('-')[0] + 'b0',
author="<NAME>",
author_email="<EMAIL>",
description="A simple chat bot, at your service.",
license="MIT",
keywords="chat slack bot irc jeev",
url="https://github.com/jhgg/jeev",
packages=find_packages(exclude=['modules']),
install_requires=[
'certifi==14.5.14',
'coloredlogs==1.0.1',
'cssselect==0.9.1',
'Flask==0.10.1',
'geopy==1.1.3',
'gevent==1.0.2',
'greenlet==0.4.7',
'humanfriendly==1.27',
'itsdangerous==0.24',
'Jinja2==2.7.3',
'lxml==3.3.6',
'MarkupSafe==0.23',
'pytz==2014.4',
'requests==2.7.0',
'six==1.9.0',
'slackclient==0.15',
'websocket-client==0.32.0',
'Werkzeug==0.9.6',
'wheel==0.24.0',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/jeev'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Communications :: Chat",
"Topic :: Utilities",
"Framework :: Flask",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"License :: OSI Approved :: MIT License",
],
)
|
[
"jeev.version.split",
"os.path.dirname",
"setuptools.find_packages"
] |
[((445, 479), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['modules']"}), "(exclude=['modules'])\n", (458, 479), False, 'from setuptools import setup, find_packages\n'), ((194, 217), 'jeev.version.split', 'jeev.version.split', (['"""-"""'], {}), "('-')\n", (212, 217), False, 'import jeev\n'), ((114, 139), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os\n')]
|
from typing import Tuple
import gym
import numpy as np
from gym_gathering.observations.base_observation_generator import ObservationGenerator
class SingleChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(SingleChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, 1), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros(self.maze.shape)
observation = self.render_particles(particles, out=observation)
observation = self.generate_noise(observation)
if self.random_goal:
observation = self.render_goal(goal, out=observation)
return observation[:, :, np.newaxis] # Convert to single channel image
class MultiChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(MultiChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.n_channels = 3 if random_goal else 2
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, self.n_channels), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros((*self.maze.shape, self.n_channels))
observation[:, :, 0] = self.render_maze()
particle_image = self.render_particles(particles)
particle_image = self.generate_noise(particle_image)
observation[:, :, 1] = particle_image
if self.random_goal:
observation[:, :, 2] = self.render_goal(goal)
return observation
|
[
"numpy.zeros",
"gym.spaces.Box"
] |
[((872, 943), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*maze.shape, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*maze.shape, 1), dtype=np.uint8)\n', (886, 943), False, 'import gym\n'), ((1062, 1087), 'numpy.zeros', 'np.zeros', (['self.maze.shape'], {}), '(self.maze.shape)\n', (1070, 1087), True, 'import numpy as np\n'), ((2169, 2259), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*maze.shape, self.n_channels)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*maze.shape, self.n_channels), dtype\n =np.uint8)\n', (2183, 2259), False, 'import gym\n'), ((2373, 2418), 'numpy.zeros', 'np.zeros', (['(*self.maze.shape, self.n_channels)'], {}), '((*self.maze.shape, self.n_channels))\n', (2381, 2418), True, 'import numpy as np\n')]
|
from sprites import *
import pygame
import random
import os
import subprocess
class Mixin:
#dd clouds and fire to game
def add_sprite(self,event,coOrds = None):
#Check coOrds are valid clouds and fire to game (coOrds = None is used to random generate a sprite's coOrds)
if (coOrds == None) or (coOrds[0] >= 0 and coOrds[1] >=0 and coOrds[1] < SCREEN_HEIGHT):
if event == "fire":
# Create the fire instance
detected = Fire(IMG_FIRE, SPRITE_SCALING_FIRE)
else:
#Create cloud instance
detected=Cloud(IMG_CLOUD, SPRITE_SCALING_CLOUD)
detected.damage = self.cloud_damage
detected.points = ((-161, 0), (-128.5, 26.0), (-91.5, 51.0), (-66.5, 50.0),(-11.5,50), (33.5,66), (65.5,47), (120.5,26),(144.5,-26),(133.5,-78),(-47.5,-73),(-74.5,-39), (-114.5,-20), (-128.5, -26.0))
# Position the sprite using coOrds
if coOrds != None:
detected.center_x = coOrds[0]
detected.center_y = coOrds[1]
#Randomly generate spirte's coOrds
else:
detected.center_y = random.randrange(0,SCREEN_HEIGHT )
detected.center_x = SCREEN_WIDTH + random.randrange(0,SCREEN_WIDTH)
#Add Sprite to relevant list
if event == "fire":
self.fire_list.append(detected)
else:
self.clouds_list.append(detected)
#Helper function used by NN. Adds fires based on results included in file
def add_new_data(self):
#Relevant file names
fileName = self.NNDir + "background" + str(self.background_index) + "-fire.txt"
picture = self.source[self.background_index-1]
with open(fileName) as f:
lines = f.readlines()
line = lines[-1].strip()
#Check to see if fire detected. If so, add fire sprite
if line[0] == '(':
line = eval(line, {"__builtins__": {}})
self.add_sprite("fire",(line[0] + SCREEN_WIDTH, SCREEN_HEIGHT - line[1]))
#Check if sprite is colliding with fire(trigger by CPU on update but by player on button press)
def check_fire_collison(self,sprite):
# Generate a list of all emergencies that collided with the satellite.
hit_list = arcade.check_for_collision_with_list(sprite,self.fire_list)
#Setup right sound effect
if sprite == self.player_sprite:
sound = self.player_sound
else:
sound = self.cpu_sound
# Loop through each colliding fire, remove it, and add to the sprite's score.
for fire in hit_list:
#If not testing with headless setup(no display)
if not self.Test:
sound.play()
fire.kill()
sprite.score += SCOREINC
|
[
"random.randrange"
] |
[((1239, 1273), 'random.randrange', 'random.randrange', (['(0)', 'SCREEN_HEIGHT'], {}), '(0, SCREEN_HEIGHT)\n', (1255, 1273), False, 'import random\n'), ((1329, 1362), 'random.randrange', 'random.randrange', (['(0)', 'SCREEN_WIDTH'], {}), '(0, SCREEN_WIDTH)\n', (1345, 1362), False, 'import random\n')]
|
#!/usr/bin/env python3
from collections import OrderedDict
filepath = r"C:\Users\Yun\Downloads\python-3.9.0-docs-text\library\code.txt"
dict_words = OrderedDict()
with open(filepath,'r') as f:
words = f.read().lower().replace('\n','').split(' ')
set_words = set(words)
set_words.remove('')
for word in set_words:
dict_words[word] = words.count(word)
print(sorted(dict_words.items(), key = lambda kv:(kv[1], kv[0])))
|
[
"collections.OrderedDict"
] |
[((151, 164), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (162, 164), False, 'from collections import OrderedDict\n')]
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect SDK.
# Copyright (c) 2019-2020 Ingram Micro. All Rights Reserved.
import pytest
from mock import patch
from connect.exceptions import Message
from connect.resources.base import BaseResource
from .common import Response
def test_deprecated_message():
# type: () -> None
with pytest.deprecated_call():
# noinspection PyStatementEffect
Message('Hello').message
@patch('requests.get')
def test_deprecation_filter_in(get_mock):
get_mock.return_value = Response(True, '[]', 200)
class TestResource(BaseResource):
resource = 'test'
test_resouce = TestResource()
filters = {
'deprecated__in': (1, 2)
}
with pytest.deprecated_call() as warning:
test_resouce.search(filters)
assert str(warning[0].message) == 'deprecated__in: __in operator is deprecated, Use RQL syntax'
|
[
"pytest.deprecated_call",
"connect.exceptions.Message",
"mock.patch"
] |
[((473, 494), 'mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (478, 494), False, 'from mock import patch\n'), ((370, 394), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {}), '()\n', (392, 394), False, 'import pytest\n'), ((755, 779), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {}), '()\n', (777, 779), False, 'import pytest\n'), ((445, 461), 'connect.exceptions.Message', 'Message', (['"""Hello"""'], {}), "('Hello')\n", (452, 461), False, 'from connect.exceptions import Message\n')]
|
from predicate import Predicate
__author__ = ''
class Rule(object):
"""
Base class for fuzzy rules
"""
__COUNT = 0
__slots__ = ('_antecedent', '_consequent', '_weight', '_number')
def __init__(self, antecedent, consequent, weight=1):
"""
Initialize a rule
:param antecedent: premise (if part)
:param consequent: conclusion (then part)
:param weight: how sure are we about this rule
"""
self._antecedent = antecedent
self._consequent = consequent
self._weight = weight
self._number = Rule.__COUNT
Rule.__COUNT += 1
def get_weight(self):
return self._weight
def set_weight(self, value):
if 0 < value <= 1:
self._weight = value
weight = property(get_weight, set_weight, doc='weight factor')
def compute(self, activation=None):
"""Compute rule's firing level and sets this value for adjectives in consequent"""
pass
#region (IN/OUT) Adjectives, Variables
def input_adj(self):
"""
Gets all adjectives in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent.adjective]
return list({p.adective for p in antecedent.leaves()})
def output_adj(self):
"""
Gets all adjectives in the consequent of rule
"""
return [predicate.adjective for predicate in self._consequent]
def input_vars(self):
"""
Gets all variables in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent.variable]
return list({p.variable for p in antecedent.leaves()})
def output_vars(self):
"""
Gets all variables in the consequent of rule
"""
return [predicate.variable for predicate in self._consequent]
#endregion
#region (IN/OUT) Predicates
def inputs(self):
"""
Gets all predicates in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent]
return list({p for p in antecedent.leaves()})
def outputs(self):
"""
Gets all predicates in the consequent of rule
"""
return [predicate for predicate in self._consequent]
def predicates(self):
"""
Gets all predicates in the rule
"""
return self.inputs() + self.outputs()
#endregion
@staticmethod
def parse(sentence, scope, tnorm=None, snorm=None, cnorm=None):
"""
Parse a str-rule with given scope and norms
"""
from rules.parser import parse_rule
return parse_rule(sentence, scope, tnorm, snorm, cnorm)
@staticmethod
def get_rule(antecedent, consequent, weight=1):
"""
Gets a correct rule for...
:param antecedent: the structure of antecedent is an operator's tree of predicates
:param consequent: the structure of consequent determines the rule type
:param weight: certainty for this rule
"""
if isinstance(consequent, list):
from rules.mrule import MRule
XRule = MRule
elif isinstance(consequent, Predicate):
from rules.trule import TRule
XRule = TRule
else:
from rules.srule import SRule
XRule = SRule
return XRule(antecedent, consequent, weight)
def __repr__(self):
w = '' if self._weight == 1 else ' WITH ' + str(self._weight)
if isinstance(self._consequent, list):
consequent = ', '.join([str(predicate) for predicate in self._consequent])
else:
consequent = self._consequent
return 'RULE {0}: IF {1} THEN {2}{3};'.format(self._number, self._antecedent, consequent, w)
__str__ = __repr__
|
[
"rules.parser.parse_rule"
] |
[((2937, 2985), 'rules.parser.parse_rule', 'parse_rule', (['sentence', 'scope', 'tnorm', 'snorm', 'cnorm'], {}), '(sentence, scope, tnorm, snorm, cnorm)\n', (2947, 2985), False, 'from rules.parser import parse_rule\n')]
|
# Generated by Django 3.0.14 on 2021-08-17 22:16
import warnings
from django.core.paginator import Paginator
from django.db import migrations
from bpp.util import pbar
def value(elem, *path, return_none=False):
v = None
if elem.versions:
for _elem in elem.versions:
if _elem["current"]:
v = _elem
break
# v = elem.current_version
if v is None:
warnings.warn(
f"Model {elem.__class__} with id {elem.mongoId} has NO current_version!"
)
if return_none:
return
return "[brak current_version]"
for elem in path:
if elem in v:
v = v[elem]
else:
if return_none:
return None
return f"[brak {elem}]"
return v
def value_or_none(elem, *path):
return value(elem, *path, return_none=True)
MAX_TEXT_FIELD_LENGTH = 512
def _pull_up_on_save(elem, pull_up_on_save):
for attr in pull_up_on_save:
v = value_or_none(elem, "object", attr)
if v is not None:
if isinstance(v, str):
if len(v) >= MAX_TEXT_FIELD_LENGTH:
v = v[:MAX_TEXT_FIELD_LENGTH]
setattr(elem, attr, v)
def rebuild_table(model, puos):
queryset = model.objects.all().only("pk", "versions").order_by("pk")
paginator = Paginator(queryset, 1000)
for page in pbar(paginator.page_range):
for elem in paginator.get_page(page).object_list:
_pull_up_on_save(elem, puos)
elem.save(update_fields=puos)
def rebuild(apps, schema_editor):
for model, puos in [
(
apps.get_model("pbn_api", "Publication"),
["year"],
),
]:
rebuild_table(model, puos)
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0026_auto_20210816_0815"),
]
operations = [
migrations.RunPython(rebuild, migrations.RunPython.noop),
]
|
[
"bpp.util.pbar",
"warnings.warn",
"django.core.paginator.Paginator",
"django.db.migrations.RunPython"
] |
[((1364, 1389), 'django.core.paginator.Paginator', 'Paginator', (['queryset', '(1000)'], {}), '(queryset, 1000)\n', (1373, 1389), False, 'from django.core.paginator import Paginator\n'), ((1406, 1432), 'bpp.util.pbar', 'pbar', (['paginator.page_range'], {}), '(paginator.page_range)\n', (1410, 1432), False, 'from bpp.util import pbar\n'), ((426, 518), 'warnings.warn', 'warnings.warn', (['f"""Model {elem.__class__} with id {elem.mongoId} has NO current_version!"""'], {}), "(\n f'Model {elem.__class__} with id {elem.mongoId} has NO current_version!')\n", (439, 518), False, 'import warnings\n'), ((1920, 1976), 'django.db.migrations.RunPython', 'migrations.RunPython', (['rebuild', 'migrations.RunPython.noop'], {}), '(rebuild, migrations.RunPython.noop)\n', (1940, 1976), False, 'from django.db import migrations\n')]
|
from django.contrib import admin
from .models import Demo, Emoji, Tag, TechStackTag, Comment
admin.site.register(Demo)
admin.site.register(Tag)
admin.site.register(TechStackTag)
admin.site.register(Comment)
admin.site.register(Emoji)
|
[
"django.contrib.admin.site.register"
] |
[((95, 120), 'django.contrib.admin.site.register', 'admin.site.register', (['Demo'], {}), '(Demo)\n', (114, 120), False, 'from django.contrib import admin\n'), ((121, 145), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag'], {}), '(Tag)\n', (140, 145), False, 'from django.contrib import admin\n'), ((146, 179), 'django.contrib.admin.site.register', 'admin.site.register', (['TechStackTag'], {}), '(TechStackTag)\n', (165, 179), False, 'from django.contrib import admin\n'), ((180, 208), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (199, 208), False, 'from django.contrib import admin\n'), ((209, 235), 'django.contrib.admin.site.register', 'admin.site.register', (['Emoji'], {}), '(Emoji)\n', (228, 235), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# Regular expression to detect Japanese Tier-2 mobile phones.
JP_TIER2_MOBILE_USER_AGENT_RE = re.compile(
r'^(KDDI|DoCoMo|SoftBank|J-PHONE|Vodafone)')
# Regular expression to detect phones which prefer Shift_JIS charset.
# Some KDDI phones support UTF-8 but they have a bug encoding UTF-8 query
# parameters.
SJIS_PREFERRED_USER_AGENT_RE = re.compile(r'^KDDI')
def is_jp_tier2_mobile_phone(request):
"""Returns True if the user agent is a Japanese Tier-2 mobile phone."""
user_agent = request.headers.get('User-Agent')
return user_agent and JP_TIER2_MOBILE_USER_AGENT_RE.match(user_agent)
def prefer_sjis_charset(request):
"""Returns True if Shift_JIS charset should be used for the user agent."""
user_agent = request.headers.get('User-Agent')
return user_agent and SJIS_PREFERRED_USER_AGENT_RE.match(user_agent)
|
[
"re.compile"
] |
[((704, 758), 're.compile', 're.compile', (['"""^(KDDI|DoCoMo|SoftBank|J-PHONE|Vodafone)"""'], {}), "('^(KDDI|DoCoMo|SoftBank|J-PHONE|Vodafone)')\n", (714, 758), False, 'import re\n'), ((955, 974), 're.compile', 're.compile', (['"""^KDDI"""'], {}), "('^KDDI')\n", (965, 974), False, 'import re\n')]
|
from fastapi import APIRouter, Depends, Request
from sqlalchemy.orm import Session
from api import search
from ..dependencies import get_user
router = APIRouter()
# Dependency
def get_db(request: Request):
return request.state.db
@router.get("/api/v1/search/{key}")
def search_schools_and_contacts(key, db: Session = Depends(get_db), user: str = Depends(get_user)):
return search.json_search(key, db)
|
[
"api.search.json_search",
"fastapi.Depends",
"fastapi.APIRouter"
] |
[((152, 163), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (161, 163), False, 'from fastapi import APIRouter, Depends, Request\n'), ((326, 341), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (333, 341), False, 'from fastapi import APIRouter, Depends, Request\n'), ((355, 372), 'fastapi.Depends', 'Depends', (['get_user'], {}), '(get_user)\n', (362, 372), False, 'from fastapi import APIRouter, Depends, Request\n'), ((386, 413), 'api.search.json_search', 'search.json_search', (['key', 'db'], {}), '(key, db)\n', (404, 413), False, 'from api import search\n')]
|
# Stacked Histogram with minutes spent on training type from monthly perspective
import psycopg2
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import seaborn as sns
from matplotlib.pyplot import figure
# get session data summary with sport split
conn = psycopg2.connect(host="localhost", database="garmin_data", user="postgres", password="*****")
df = pd.read_sql_query("""select to_char(timestamp, 'YYYY-MM') as stamp, sum(total_timer_time / 60) as minutes_spent, sport
from session
group by to_char(timestamp, 'YYYY-MM'), sport
having sum(total_timer_time / 60) > 0
order by to_char(timestamp, 'YYYY-MM') desc""", conn)
# get min and max dates from the dataframe
min_date = datetime.strptime(min(df.stamp), '%Y-%m')
max_date = datetime.strptime(max(df.stamp), '%Y-%m')
n_max_date = max_date + pd.DateOffset(months=1)
# create a table with all months from min to max date
data = pd.DataFrame()
data['Dates'] = pd.date_range(start=min_date, end=n_max_date, freq='M')
data['Dates'] = data['Dates'].dt.strftime('%Y-%m')
# merge datasets
df_main = pd.merge(data, df, left_on='Dates', right_on='stamp', how='left', indicator=True)
df_main = df_main[['Dates', 'minutes_spent','sport']]
df_main = df_main.fillna(0)
# pivot table
df_pivot = pd.pivot_table(df_main, index='Dates', columns='sport', values='minutes_spent').reset_index()
df_pivot = df_pivot.fillna(0)
df_pivot = df_pivot[['Dates', 'cross_country_skiing', 'cycling', 'running', 'swimming', 'walking']]
# create stacked bar chart for monthly sports
df_pivot.plot(x='Dates', kind='bar', stacked=True, color=['r', 'y', 'g', 'b', 'k'])
# labels for x & y axis
plt.xlabel('Months', fontsize=20)
plt.ylabel('Minutes Spent', fontsize=20)
plt.legend(loc='upper left', fontsize=20)
for num in [69, 57, 45, 33, 21, 9]:
plt.axvline(linewidth=2, x=num, linestyle=':', color = 'grey')
# title of plot
plt.title('Minutes spent by Sport', fontsize=20)
plt.rcParams['figure.figsize'] = [24, 10]
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"pandas.date_range",
"pandas.pivot_table",
"pandas.merge",
"matplotlib.pyplot.legend",
"pandas.read_sql_query",
"matplotlib.pyplot.ylabel",
"pandas.DateOffset",
"matplotlib.pyplot.xlabel",
"psycopg2.connect"
] |
[((290, 387), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': '"""localhost"""', 'database': '"""garmin_data"""', 'user': '"""postgres"""', 'password': '"""*****"""'}), "(host='localhost', database='garmin_data', user='postgres',\n password='*****')\n", (306, 387), False, 'import psycopg2\n'), ((389, 765), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""select to_char(timestamp, \'YYYY-MM\') as stamp, sum(total_timer_time / 60) as minutes_spent, sport \n from session\n group by to_char(timestamp, \'YYYY-MM\'), sport\n having sum(total_timer_time / 60) > 0\n order by to_char(timestamp, \'YYYY-MM\') desc"""', 'conn'], {}), '(\n """select to_char(timestamp, \'YYYY-MM\') as stamp, sum(total_timer_time / 60) as minutes_spent, sport \n from session\n group by to_char(timestamp, \'YYYY-MM\'), sport\n having sum(total_timer_time / 60) > 0\n order by to_char(timestamp, \'YYYY-MM\') desc"""\n , conn)\n', (406, 765), True, 'import pandas as pd\n'), ((1016, 1030), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1028, 1030), True, 'import pandas as pd\n'), ((1047, 1102), 'pandas.date_range', 'pd.date_range', ([], {'start': 'min_date', 'end': 'n_max_date', 'freq': '"""M"""'}), "(start=min_date, end=n_max_date, freq='M')\n", (1060, 1102), True, 'import pandas as pd\n'), ((1182, 1268), 'pandas.merge', 'pd.merge', (['data', 'df'], {'left_on': '"""Dates"""', 'right_on': '"""stamp"""', 'how': '"""left"""', 'indicator': '(True)'}), "(data, df, left_on='Dates', right_on='stamp', how='left', indicator\n =True)\n", (1190, 1268), True, 'import pandas as pd\n'), ((1753, 1786), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Months"""'], {'fontsize': '(20)'}), "('Months', fontsize=20)\n", (1763, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1827), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Minutes Spent"""'], {'fontsize': '(20)'}), "('Minutes Spent', fontsize=20)\n", (1797, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1869), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(20)'}), "(loc='upper left', fontsize=20)\n", (1838, 1869), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2039), 'matplotlib.pyplot.title', 'plt.title', (['"""Minutes spent by Sport"""'], {'fontsize': '(20)'}), "('Minutes spent by Sport', fontsize=20)\n", (2000, 2039), True, 'import matplotlib.pyplot as plt\n'), ((930, 953), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (943, 953), True, 'import pandas as pd\n'), ((1911, 1971), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'linewidth': '(2)', 'x': 'num', 'linestyle': '""":"""', 'color': '"""grey"""'}), "(linewidth=2, x=num, linestyle=':', color='grey')\n", (1922, 1971), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1451), 'pandas.pivot_table', 'pd.pivot_table', (['df_main'], {'index': '"""Dates"""', 'columns': '"""sport"""', 'values': '"""minutes_spent"""'}), "(df_main, index='Dates', columns='sport', values='minutes_spent')\n", (1386, 1451), True, 'import pandas as pd\n')]
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = sorted(list(map(int, input().split())))
ans = 0
for i in range(n):
ans += a[(3 * n) - (2*i + 2)]
print(ans)
|
[
"sys.setrecursionlimit"
] |
[((38, 68), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (59, 68), False, 'import sys\n')]
|
import pandas as pd
from common.tflogs2pandas import tflog2pandas
import glob
df_results = pd.DataFrame()
filenames = glob.glob("output_data/tensorboard/model-*/PPO_1")
for filename in filenames:
print(filename)
df = tflog2pandas(filename)
df = df[df["metric"]=="time/fps"]
average_fps = df["value"].mean()
min_fps = df["value"].min()
print("average_fps: ", average_fps, ", min_fps: ", min_fps,)
df_results = df_results.append({
"path": filename,
"average_fps": average_fps,
"min_fps": min_fps,
}, ignore_index=True)
df_results.to_pickle("output_data/tmp/which_nodes_are_slow")
|
[
"pandas.DataFrame",
"common.tflogs2pandas.tflog2pandas",
"glob.glob"
] |
[((93, 107), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (105, 107), True, 'import pandas as pd\n'), ((120, 170), 'glob.glob', 'glob.glob', (['"""output_data/tensorboard/model-*/PPO_1"""'], {}), "('output_data/tensorboard/model-*/PPO_1')\n", (129, 170), False, 'import glob\n'), ((227, 249), 'common.tflogs2pandas.tflog2pandas', 'tflog2pandas', (['filename'], {}), '(filename)\n', (239, 249), False, 'from common.tflogs2pandas import tflog2pandas\n')]
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,FileField,SubmitField
from wtforms.validators import Required
class CommentForm(FlaskForm):
title = StringField('Comment title',validators= [Required()])
comment = TextAreaField('Comment review')
submit = SubmitField('submit')
class BlogForm(FlaskForm):
title = StringField('Blog title',validators= [Required()])
message = TextAreaField('Blog Message',validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about yourself',validators=[Required()])
submit = SubmitField('submit')
|
[
"wtforms.SubmitField",
"wtforms.validators.Required",
"wtforms.TextAreaField"
] |
[((251, 282), 'wtforms.TextAreaField', 'TextAreaField', (['"""Comment review"""'], {}), "('Comment review')\n", (264, 282), False, 'from wtforms import StringField, TextAreaField, FileField, SubmitField\n'), ((296, 317), 'wtforms.SubmitField', 'SubmitField', (['"""submit"""'], {}), "('submit')\n", (307, 317), False, 'from wtforms import StringField, TextAreaField, FileField, SubmitField\n'), ((492, 513), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (503, 513), False, 'from wtforms import StringField, TextAreaField, FileField, SubmitField\n'), ((634, 655), 'wtforms.SubmitField', 'SubmitField', (['"""submit"""'], {}), "('submit')\n", (645, 655), False, 'from wtforms import StringField, TextAreaField, FileField, SubmitField\n'), ((224, 234), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (232, 234), False, 'from wtforms.validators import Required\n'), ((397, 407), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (405, 407), False, 'from wtforms.validators import Required\n'), ((466, 476), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (474, 476), False, 'from wtforms.validators import Required\n'), ((608, 618), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (616, 618), False, 'from wtforms.validators import Required\n')]
|
# -*- coding: utf-8 -*-
"""
The goal of this code is to support hosting a client library. This module
should in the end function similarly to the Mendeley Desktop.
Syncing
-------------------------------------------
Jim's next goals
----------------
1) Handle deleted IDs - needs an API update
2) Meta Data Editor
- nice query interface
- needs to handle local/dirty docs
- autodownload files when opening ...
3) Update by PMID ...
4) How to sync deleted ids?
Features
--------
1) Initializes a representation of the documents stored in a user's library
2) Synchronizes the local library with updates that have been made remotely
Usage
-----
from mendeley import client_library
cl = client_library.UserLibrary(verbose=True)
wtf = cl.has_docs([14581232,10529706,12345])
"""
#Standard Library Imports
from typing import Optional, Union, TypeVar, List
import pickle
from datetime import datetime
from timeit import default_timer as ctime
import os
import sys
import json
#Third Party Imports
import pandas as pd
from sqlalchemy import desc
# Local imports
from .api import API
from .db_tables import DB, Document
from . import errors
from . import models
from . import utils
from . import config
from .utils import display_class, quotes
# Optional Local Imports
#-----------------------------
#These need to be updated
#from .optional import rr
#from .optional import pdf_retrieval
#from . import db_interface
# from . import archive_library
fstr = utils.float_or_none_to_string
cld = utils.get_list_class_display
class LibraryOptions(object):
#TODO: Support default sync resolution mechanism
#TODO: Load options from file???? - GUI?
pass
class UserLibrary:
"""
Attributes
----------
"""
api : 'API'
db : 'DB'
user_name : 'str'
verbose : 'bool'
cleaner : 'LibraryCleaner'
def __init__(self, user_name=None, verbose=False, sync=True,
force_new=False):
"""
Inputs
------
user_name : string (default None)
If no user is specified the default user is loaded from the
configuration file.
verbose : bool (default False)
sync : bool (default True)
force_new : bool (default False)
If true the library is not loaded from disk.
"""
self.api = API(user_name=user_name,verbose=verbose)
self.user_name = self.api.user_name
self.verbose = verbose
# path handling
# -------------
root_path = config.get_save_root(['client_library'], True)
save_name = utils.user_name_to_file_name(self.user_name) + '.pickle'
self.file_path = os.path.join(root_path, save_name)
self.db = DB(self.user_name)
self.db_session = self.db.get_session()
self.cleaner = LibraryCleaner(self.db)
if sync:
self.sync()
def __repr__(self):
pv = ['api', cld(self.api),
'db', cld(self.db),
'dirty_db', self.dirty_db,
'user_name', self.user_name,
'file_path', self.file_path,
'sync_result',cld(self.sync_result),
'verbose', self.verbose,
'methods', '--------------------'
'has_docs','Returns whether library has the documents']
return utils.property_values_to_string(pv)
def has_docs(self,ids,type='pmid'):
"""
Parameters
----------
ids :
type :
"""
output = []
session = self.db_session
if type == 'pmid':
for id in ids:
temp = session.query(self.db.Document.pmid).filter_by(pmid = id).first()
output.append(bool(temp))
elif type =='doi':
for id in ids:
temp = session.query(self.db.Document.doi).filter_by(doi = id).first()
output.append(bool(temp))
elif type == 'arxiv':
temp = session.query(self.db.Document.arxiv).filter_by(arxiv=id).first()
output.append(bool(temp))
else:
raise Exception('Unrecognized id type')
return output
def sync(self,verbose=None):
"""
Syncs the library with the Mendeley server.
Parameters
----------
verbose : bool (default, inherit from class value, self.verbose)
TODO:
? How do we know if something has been restored from the trash?
"""
if verbose is None:
verbose = self.verbose
self.sync_result = Sync(self.api, self.db, verbose=verbose)
# def archive(self):
# archivist = archive_library.Archivist(library=self, api=self.api)
# archivist.archive()
def get_documents(self,
query_dict,
as_dict=False):
session = self.db_session
temp = session.query(self.db.Document).filter_by(**query_dict)
#TODO: Support hiding deleted and trashed ...
docs = temp.all()
if docs and as_dict:
return [x.as_dict for x in docs]
else:
return docs
def get_document(self,
query_dict,
as_dict=False):
"""
Returns the document (i.e. metadata) based on a specified identifier.
Parameters
----------
as_dict : bool (default False)
- True, returned as dictionary
- False, SQLAlchemy objects
Improvements
------------
- add methods that return counts or partial queries for qeury building
Returns
-------
Examples
--------
from mendeley import client_library
c = client_library.UserLibrary(verbose=True)
doc = c.get_document({'title':'magazine article title'})
"""
session = self.db_session
temp = session.query(self.db.Document).filter_by(**query_dict)
doc = temp.first()
if doc and as_dict:
return doc.as_dict()
else:
return doc
def add_to_library(self,
doi=None,
pmid=None,
check_in_lib=False,
add_pdf=True,
file_path=None):
"""
JAH: I think this method is still under development ...
Parameters
----------
doi : string
check_in_lib : bool
If true,
add_pdf : bool
Improvements
------------
*
- allow adding via PMID
- pdf entry should be optional with default true
- also need to handle adding pdf if possible but no error
if not possible
"""
#JAH: Why doesn't this take in any inputs on the check???
if check_in_lib and self.check_for_document():
raise errors.DuplicateDocumentError('Document already exists in library.')
#----------------------------------------------------------------------
# Get paper information from DOI
"""
Even then, this requires a bit of thinking. Why are we asking rr for
paper information? Perhaps we need another repository ...
- Pubmed
- Crossref
- others????
"""
paper_info = rr.retrieve_all_info(input=doi, input_type='doi')
# Turn the BaseEntry object into a formatted dict for submission
# to the Mendeley API
formatted_entry = self._format_doc_entry(paper_info.entry)
# Create the new document
new_document = self.api.documents.create(formatted_entry)
"""
add_pdf
* I want to be able to specify the path to the file to add.
* Perhaps instead we want:
pdf = file_path
pdf = 'must_retrieve'
pdf = 'retrieve_or_request' - If not available, make a request for it
pdf = 'retrive_if_possible'
I'm not thrilled with this specific interface, but I'd like something
like this.
We might want an additional package that focuses on retrieving pdfs.
The big question is how to support letting these interfaces interact
efficiently without doing things multiple times. We can answer this
at a later time.
pdf retrieval:
- Interlibrary loan
- ScholarSolutions
- PyPub
"""
# Get pdf
if add_pdf:
pdf_content = pdf_retrieval.get_pdf(paper_info)
new_document.add_file({'file' : pdf_content})
def update_file_from_local(self, doi=None, pmid=None):
"""
This is for updating a file in Mendeley without losing the annotations.
The file must be saved somewhere locally, and the file path is selected
by using a pop up file selection window.
Parameters
----------
doi - DOI of document in library to update
pmid - PMID of document in library to update
"""
if doi is None and pmid is None:
raise KeyError('Please enter a DOI or PMID for the updating document.')
document = self.get_document(doi=doi, pmid=pmid, return_json=True)
if document is None:
raise errors.DOINotFoundError('Could not locate DOI in library.')
new_file_path = self._file_selector()
if new_file_path is None:
return
with open(new_file_path, 'rb') as file:
file_content = file.read()
doc_id = document.get('id')
saved_annotations_string = self.api.annotations.get(document_id=doc_id)
saved_annotations = json.loads(saved_annotations_string)
if isinstance(saved_annotations, list):
saved_annotations = saved_annotations[0]
has_file = document.get('file_attached')
if has_file:
_, _, file_id = self.api.files.get_file_content_from_doc_id(doc_id=doc_id, no_content=True)
self.api.files.delete(file_id=file_id)
params = {'title': document.get('title'), 'id': doc_id}
self.api.files.link_file(file=file_content, params=params)
# Reconfirm that the file was added
updated = self.get_document(doi=doi, pmid=pmid, return_json=True)
has_file = updated.get('file_attached')
if not has_file:
raise FileNotFoundError('File was not attached.')
new_annotations_string = self.api.annotations.get(document_id=doc_id)
if new_annotations_string is None or saved_annotations_string != new_annotations_string:
self.api.annotations.create(annotation_body=saved_annotations)
def _file_selector(self):
#TODO: Test this with non * imports
#
#Why is this line needed???
app = QApplication(sys.argv)
dialog = QFileDialog()
# dialog.setFileMode(QFileDialog.DirectoryOnly)
dialog.setViewMode(QFileDialog.List)
dialog.setDirectory(os.path.expanduser('~'))
if dialog.exec_():
filenames = dialog.selectedFiles()
return filenames[0]
else:
return None
def _format_doc_entry(self, entry):
"""
Mendeley API has specific input formatting when creating a document.
- Parses author names and separates into separate "first_name" and
"last_name" fields.
- Restricts keywords from being > 50 characters. If one is found,
it is split by spaces and saved as separate keywords.
- Changes "publication" to "publisher" to fit syntax.
- Sets "type" to "journal"
- Saves DOI within "identifiers" field.
Parameters
----------
entry : BaseEntry object
See pypub.scrapers.base_objects.py
Unformatted paper information, usually from PaperInfo class
Returns
-------
entry : dict
Paper information with proper formatting applied.
"""
if not isinstance(entry, dict):
entry = entry.__dict__
# Format author names
authors = entry.get('authors')
formatted_author_names = None
if authors is not None:
if isinstance(authors[0], str):
author_names = [x for x in authors]
elif isinstance(authors[0], dict):
author_names = [x.get('name') for x in authors]
else:
author_names = [x.name for x in authors]
formatted_author_names = []
# Parse author names
for name in author_names:
name_dict = dict()
name = name.strip()
parts = name.split(' ')
# If format is "firstname middleinitial. lastname"
if '.' in name and len(parts) == 3:
name_dict['first_name'] = parts[0]
name_dict['last_name'] = parts[2]
# If format is "lastname, firstname"
elif ',' in name:
name_dict['first_name'] = parts[1]
name_dict['last_name'] = parts[0]
# If format is "lastname firstinitial"
elif len(parts) == 2 and '.' in parts[1]:
name_dict['first_name'] = parts[1]
name_dict['last_name'] = parts[0]
# If format is only "lastname"
elif len(parts) == 1:
name_dict['last_name'] = parts[0]
name_dict['first_name'] = ''
# If there are multiple initials
elif len(parts) > 3:
initials = ''
for part in parts:
if '.' in part:
initials += part
else:
name_dict['last_name'] = part
name_dict['first_name'] = initials
# Otherwise assume format is "firstname lastname" or "firstinitial. lastname"
else:
name_dict['first_name'] = parts[0]
name_dict['last_name'] = parts[1]
formatted_author_names.append(name_dict)
# Make sure keywords are <= 50 characters
kw = entry.get('keywords')
if kw is not None:
# Check if it's one long string, and split if so
if isinstance(kw, str):
kw = kw.split(', ')
to_remove = []
for keyword in kw:
if len(keyword) > 50:
to_remove.append(keyword)
smaller_keywords = keyword.split(' ')
for word in smaller_keywords:
kw.append(word)
for long_word in to_remove:
kw.remove(long_word)
entry['keywords'] = kw
# Get rid of alpha characters in Volume field
vol = entry.get('volume')
if vol is not None:
entry['volume'] = ''.join(c for c in vol if not c.isalpha())
# Get rid of alpha characters in Year field
year = entry.get('year')
if year is not None:
entry['year'] = ''.join(c for c in year if not c.isalpha())
if entry['year'] == '':
entry['year'] = None
doi = entry.get('doi')
if doi is not None:
doi = doi.lower()
entry['identifiers'] = {'doi' : doi}
entry['authors'] = formatted_author_names
entry['publisher'] = entry['publication']
entry['type'] = 'journal'
return entry
class Sync(object):
"""
This object should perform the syncing and include some
debugging information as well.
Attributes
----------
raw : json
df :
"""
def __init__(self, api:API, db:DB, verbose=False):
"""
Inputs
------
api :
raw_json :
"""
self.db = db
self.api = api
self.verbose = verbose
self.verbose_print("Starting sync")
#What happens to trashed documents?
#- we can request trahsed documents ...
#There is no notification that a document has been trashed ...
#- we need to request trashed documents ...
#deleted_since
session = db.get_session()
#=> I want to get the times
#wtf = session.query(db.)
import pdb
pdb.set_trace()
#----------------------------------------------------------------------
#TODO: Does our code support an empty database?
last_modified = session.query(db.Document.last_modified).order_by(desc('last_modified')).first()
last_modified = last_modified[0]
new_docs = api.documents.get(modified_since=last_modified,limit=100,return_type='json')
result = db.add_documents(new_docs,session=session,drop_time=last_modified)
if result.n_different > 0:
self.verbose_print(result.get_summary_string())
else:
self.verbose_print("No new documents found in sync")
count = 0
while api.has_next_link:
count += 100
#TODO: Fix this to occur after we get the new ones
print("Requesting more docs starting at {}".format(count))
docs_to_add = api.next()
r2 = db.add_documents(docs_to_add,session=session,drop_time=last_modified)
self.verbose_print(r2.get_summary_string())
result.merge(r2)
self.add_result = result
session.commit()
#Deleted docs
#----------------------------------------------------------------------
deleted_docs = api.documents.get_deleted(return_type='json')
#Handling updated docs - sync to server
#----------------------------------------------------------------------
#Note, conflicts have already been handled at this point ...
dirty_docs = session.query(db.Document).filter_by(is_dirty=True).all() # type: List[Document]
if dirty_docs:
self.verbose_print()
for doc in dirty_docs:
if doc.is_trashed:
pass
elif doc.is_deleted:
pass
else:
#Update
temp = doc.as_dict()
r = api.documents.update(temp['id'], temp)
doc.commit(_is_dirty=False)
#Look for deleted docs
#Look for trash
session.close()
# #What if in trash?
# #What if deleted????
# temp = api.documents.get_by_id()
#Now, let's look at dirty docs ...
#Note, any conflicts will have already been handled ...
self.verbose_print("Sync completed")
#trashed_docs = api.trash.get()
"""
- /documents/?modified_since=2020-01-22T19:36:03.000Z&limit=100&view=all HTTP/1.1
- GET /documents/?limit=100&deleted_since=2020-01-22T19:36:03.000Z HTTP/1.1
- GET /trash/?modified_since=2020-01-22T19:36:03.000Z&limit=100&view=all HTTP/1.1
- GET /files/?include_trashed=true&limit=100&deleted_since=2020-01-22T19:36:09.000Z HTTP/1.1
- GET /files/?added_since=2020-01-22T19:36:09.000Z&include_trashed=true&limit=100 HTTP/1.1
- GET /annotations/?modified_since=2020-01-22T19:36:09.000Z&limit=200&include_trashed=true HTTP/1.1
- GET /annotations/?limit=200&include_trashed=true&deleted_since=2020-01-22T19:36:10.000Z HTTP/1.1
- GET /recently_read/ HTTP/1.1- POST /events/_batch/ HTTP/1.1
"""
def __repr__(self):
return display_class(self,
[ 'db', cld(self.db),
'api', cld(self.api),
'verbose', self.verbose,
'add_result',cld(self.add_result)])
def update_sync(self):
"""
Update Steps
------------
1.
"""
self.verbose_print('Running "UPDATE SYNC"')
start_sync_time = ctime()
# Let's work with everything as a dataframe
self.docs = _raw_to_data_frame(self.raw_json)
# Determine the document that was updated most recently. We'll ask for
# everything that changed after that time. This avoids time sync
# issues with the server and the local computer since everything
# is done relative to the timestamps from the server.
newest_modified_time = self.docs['last_modified'].max()
self.newest_modified_time = newest_modified_time
# The problem with the above approach is that Mendeley returns
# documents updated since AND at 'newest_modified_time'. This
# means that the call always returns >= 1 document.
# Try adding a second to 'newest_modified_time'
later_modified_time = newest_modified_time + pd.Timedelta('00:00:01')
# Remove old ids
#------------------------------------
self.get_trash_ids()
#self.get_deleted_ids(newest_modified_time)
self.get_deleted_ids(later_modified_time)
self.remove_old_ids()
# Process new and updated documents
# ------------------------------------
updates_and_new_entries_start_time = ctime()
self.verbose_print('Checking for modified or new documents')
#self.get_updates_and_new_entries(newest_modified_time)
self.get_updates_and_new_entries(later_modified_time)
self.time_modified_processing = ctime() - updates_and_new_entries_start_time
self.verbose_print('Done updating modified and new documents')
self.raw_json = self.docs['json'].tolist()
self.time_update_sync = ctime() - start_sync_time
self.verbose_print('Done running "UPDATE SYNC" in %s seconds' % fstr(self.time_update_sync))
def get_updates_and_new_entries(self, newest_modified_time):
"""
# 3) check modified since - add/update as necessary
#-------------------------------------------------
# I think for now to keep things simple we'll relate everything
# to the newest last modified value, rather than worrying about
# mismatches in time between the client and the server
"""
start_modified_time = ctime()
doc_set = self.api.documents.get(modified_since=newest_modified_time, view='all',limit=0)
nu_docs_as_json = [x.json for x in doc_set.docs]
self.new_and_updated_docs = doc_set.docs
self.time_modified_check = ctime() - start_modified_time
if len(nu_docs_as_json) == 0:
return
self.verbose_print('Request returned %d updated or new docs' % len(nu_docs_as_json))
df = _raw_to_data_frame(nu_docs_as_json)
is_new_mask = df['created'] > newest_modified_time
new_rows_df = df[is_new_mask]
updated_rows_df = df[~is_new_mask]
# Log the new entries in the database
#Old code
# #for x in range(len(new_rows_df)):
# row = new_rows_df.iloc[x]
# db_interface.add_to_db(row)
if len(new_rows_df) > 0:
self.verbose_print('%d new documents found' % len(new_rows_df))
self.docs = self.docs.append(new_rows_df)
self.verbose_print('Updating database with new entries')
# Log the new entries in the database
for x in range(len(new_rows_df)):
row = new_rows_df.iloc[x]
db_interface.add_to_db(row)
#JAH TODO: I would prefer to have the message of # updated
#first then messages about the dbupdates
#
# At a quick glance I need to look more closely at the indices work
# Log the updated entries in the database
for x in range(len(updated_rows_df)):
row = updated_rows_df.iloc[x]
db_interface.update_db_entry(row)
if len(updated_rows_df) > 0:
self.verbose_print('%d updated documents found' % len(updated_rows_df))
in_old_mask = updated_rows_df.index.isin(self.docs.index)
if not in_old_mask.all():
print('Logic error, updated entries are not in the original')
raise Exception('Logic error, updated entries are not in the original')
updated_indices = updated_rows_df.index
self.docs.drop(updated_indices, inplace=True)
self.docs = pd.concat([self.docs, updated_rows_df])
def get_trash_ids(self):
"""
Here we are looking for documents that have been moved to the trash.
??? Can we check the trash that's been moved back to the main
??? => would this show up as an update?
"""
trash_start_time = ctime()
self.verbose_print('Checking trash')
trash_set = self.api.trash.get(limit=0, view='ids')
self.trash_ids = trash_set.docs
self.verbose_print('Finished checking trash, %d documents found' % len(self.trash_ids))
self.time_trash_retrieval = ctime() - trash_start_time
def get_deleted_ids(self, newest_modified_time):
"""
"""
# 2) Check deleted
deletion_start_time = ctime()
self.verbose_print('Requesting deleted file IDs')
#TODO: What happens if newest_modified_time is empty????
# => Do we even run this code???
temp = self.api.documents.get(deleted_since=newest_modified_time,limit=0)
self.deleted_ids = temp.docs
self.verbose_print('Done requesting deleted file IDs, %d found' % len(self.deleted_ids))
self.time_deleted_check = ctime() - deletion_start_time
def remove_old_ids(self):
"""
JAH: When is this called????
"""
# Removal of ids
# --------------
ids_to_remove = self.trash_ids + self.deleted_ids
if len(ids_to_remove) > 0:
delete_mask = self.docs.index.isin(ids_to_remove)
keep_mask = ~delete_mask
self.n_docs_removed = sum(delete_mask)
self.docs = self.docs[keep_mask]
def verbose_print(self, msg):
if self.verbose:
print(msg)
class LibraryCleaner():
db : 'DB'
def __init__(self,db : DB):
self.db = db
def get_docs_no_pmid(self,since=None,sort=None,limit=None):
"""
sort:
'old_first'
'new_first'
:param since:
:param sort:
:param limit:
:return:
"""
#TODO: implement since ...
session = self.db.get_session()
Doc = self.db.Document
q = session.query(Doc).filter_by(pmid=None)
if sort is 'new_first' or sort is None:
q.order_by(Doc.last_modified)
else:
q.order_by(desc(Doc.last_modified))
if limit is not None:
q.limit(limit)
#desc
wtf = q.all()
import pdb
pdb.set_trace()
pass
def parse_datetime(x):
return datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%fZ")
# def datetime_to_string(x):
# return x.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def parse_issn(x):
# This value is not necessarily clean
# e.g 17517214 => 1751-7214???
try:
return x.get('issn', '')
except:
return ''
def parse_pmid(x):
try:
return x.get('pmid', '')
except:
return ''
def parse_doi(x):
try:
return x.get('doi', '').lower()
except:
return ''
def raise_(ex):
raise ex
|
[
"os.path.expanduser",
"json.loads",
"timeit.default_timer",
"datetime.datetime.strptime",
"pdb.set_trace",
"sqlalchemy.desc",
"pandas.Timedelta",
"os.path.join",
"pandas.concat"
] |
[((28339, 28384), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (28356, 28384), False, 'from datetime import datetime\n'), ((2819, 2853), 'os.path.join', 'os.path.join', (['root_path', 'save_name'], {}), '(root_path, save_name)\n', (2831, 2853), False, 'import os\n'), ((10241, 10277), 'json.loads', 'json.loads', (['saved_annotations_string'], {}), '(saved_annotations_string)\n', (10251, 10277), False, 'import json\n'), ((17255, 17270), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (17268, 17270), False, 'import pdb\n'), ((21055, 21062), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (21060, 21062), True, 'from timeit import default_timer as ctime\n'), ((22312, 22319), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (22317, 22319), True, 'from timeit import default_timer as ctime\n'), ((23361, 23368), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (23366, 23368), True, 'from timeit import default_timer as ctime\n'), ((26003, 26010), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (26008, 26010), True, 'from timeit import default_timer as ctime\n'), ((26465, 26472), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (26470, 26472), True, 'from timeit import default_timer as ctime\n'), ((28269, 28284), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (28282, 28284), False, 'import pdb\n'), ((11592, 11615), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (11610, 11615), False, 'import os\n'), ((21906, 21930), 'pandas.Timedelta', 'pd.Timedelta', (['"""00:00:01"""'], {}), "('00:00:01')\n", (21918, 21930), True, 'import pandas as pd\n'), ((22559, 22566), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (22564, 22566), True, 'from timeit import default_timer as ctime\n'), ((22765, 22772), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (22770, 22772), True, 'from timeit import default_timer as ctime\n'), ((23634, 23641), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (23639, 23641), True, 'from timeit import default_timer as ctime\n'), ((25667, 25706), 'pandas.concat', 'pd.concat', (['[self.docs, updated_rows_df]'], {}), '([self.docs, updated_rows_df])\n', (25676, 25706), True, 'import pandas as pd\n'), ((26297, 26304), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (26302, 26304), True, 'from timeit import default_timer as ctime\n'), ((26900, 26907), 'timeit.default_timer', 'ctime', ([], {}), '()\n', (26905, 26907), True, 'from timeit import default_timer as ctime\n'), ((28110, 28133), 'sqlalchemy.desc', 'desc', (['Doc.last_modified'], {}), '(Doc.last_modified)\n', (28114, 28133), False, 'from sqlalchemy import desc\n'), ((17486, 17507), 'sqlalchemy.desc', 'desc', (['"""last_modified"""'], {}), "('last_modified')\n", (17490, 17507), False, 'from sqlalchemy import desc\n')]
|
"""
Django settings for imagr_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# settings for MailGun email
import credentials
credentials.set_credentials()
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_PORT = 25
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
import deployment
deployment.setup_deployment()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['DEBUG']
TEMPLATE_DEBUG = os.environ['TEMPLATE_DEBUG']
# There's only one host entry the Django app needs when it's running on
# gunicorn behind an Nginx server, and that is '*', because Nginx is
# doing all the filtering for us with our entry in the nginx.conf file.
# Django still wants ALLOWED_HOSTS to be a list, so:
ALLOWED_HOSTS = list(os.environ['ALLOWED_HOSTS'])
# CSRF cookie settings defaults should be permissible for this demonstration,
# because we don't need to handle a certificate yet.
# In reality we'd want to use a certificate and set them to True
# via the deployment file.
# CSRF_COOKIE_SECURE = os.environ['CSRF_COOKIE_SECURE']
# SESSION_COOKIE_SECURE = os.environ['SESSION_COOKIE_SECURE']
CONN_MAX_AGE = os.environ['CONN_MAX_AGE']
# Application definition
LOGIN_REDIRECT_URL = "imagr_app:front_page"
#LOGIN_URL = "RegistrationView"
#LOGOUT_URL
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagr_app',
'registration',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagr_site.urls'
WSGI_APPLICATION = 'imagr_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'imagr',
'USER': 'imagr',
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
}
}
# Thanks to Hybrid at:
# http://stackoverflow.com/questions/21978562/django-test-error-permission-denied-to-create-database-using-heroku-postgres
import sys
if 'test' in sys.argv:
DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'imagr_app.ImagrUser'
#LOGIN_URL = "/"
ACCOUNT_ACTIVATION_DAYS = 60
# These variables are set this way for deployment (overwriting values
# set above (like DEBUG="True")
STATIC_ROOT = "static/"
MEDIA_ROOT = "media/"
#DEBUG = False
ALLOWED_HOSTS = ['*',]
# There is a risk that the greater security of setting
# these to True will not work unless we get an SSL
# certificate, and we don't know yet whether Amazon EC2
# will give us a certificate or let us use one of theirs
# CSRF_COOKIE_SECURE = "True"
# SESSION_COOKIE_SECURE = "True"
# Performance Optimizations
|
[
"deployment.setup_deployment",
"os.path.dirname",
"credentials.set_credentials"
] |
[((433, 462), 'credentials.set_credentials', 'credentials.set_credentials', ([], {}), '()\n', (460, 462), False, 'import credentials\n'), ((883, 912), 'deployment.setup_deployment', 'deployment.setup_deployment', ([], {}), '()\n', (910, 912), False, 'import deployment\n'), ((358, 383), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (373, 383), False, 'import os\n')]
|
import cv2
import numpy as np
import time
'''
Parameters Used inside Code
'''
#Gaussian kernel size used for blurring
G_kernel_size = (3,3)
#canny thresholding parameters
canny_u_threshold = 200
canny_l_threshold = 80
# define the upper and lower boundaries of the HSV pixel
# intensities to be considered 'skin'
lower = np.array([0, 48, 80], dtype = "uint8")
upper = np.array([20, 255, 255], dtype = "uint8")
black_lower = np.array([0, 0, 0], dtype = "uint8")
black_upper = np.array([180, 255, 30], dtype = "uint8")
#threshhold for % of skin area detected
skinThresh = 0.00025
#Minimum number of whitepixels needed for square to be counted as occupied
min_white_count = 1
#minimum number of black detected pixels in square
min_black_pixels = 200
|
[
"numpy.array"
] |
[((325, 361), 'numpy.array', 'np.array', (['[0, 48, 80]'], {'dtype': '"""uint8"""'}), "([0, 48, 80], dtype='uint8')\n", (333, 361), True, 'import numpy as np\n'), ((372, 411), 'numpy.array', 'np.array', (['[20, 255, 255]'], {'dtype': '"""uint8"""'}), "([20, 255, 255], dtype='uint8')\n", (380, 411), True, 'import numpy as np\n'), ((430, 464), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': '"""uint8"""'}), "([0, 0, 0], dtype='uint8')\n", (438, 464), True, 'import numpy as np\n'), ((481, 520), 'numpy.array', 'np.array', (['[180, 255, 30]'], {'dtype': '"""uint8"""'}), "([180, 255, 30], dtype='uint8')\n", (489, 520), True, 'import numpy as np\n')]
|
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import traceback
from mclib.visual_zone import VisualZone
class LayerItem(QGraphicsRectItem):
def __init__(self, room, layer_index, renderer, main_window):
super().__init__()
self.room = room
self.layer_index = layer_index
self.renderer = renderer
self.rom = room.rom
self.main_window = main_window
try:
self.render_layer()
except Exception as e:
stack_trace = traceback.format_exc()
error_message = "Error rendering layer in room %02X-%02X:\n" % (room.area.area_index, room.room_index)
error_message += str(e) + "\n\n" + stack_trace
print(error_message)
def layer_clicked(self, x, y, button):
if x < 0 or y < 0 or x >= self.room.width or y >= self.room.height:
return
tile_x = x//0x10
tile_y = y//0x10
x = tile_x*0x10
y = tile_y*0x10
curr_tileset_scene = self.main_window.selected_tileset_graphics_scene
if button == Qt.LeftButton:
for x_off in range(curr_tileset_scene.selection_w):
for y_off in range(curr_tileset_scene.selection_h):
curr_tile_x_on_layer = tile_x + x_off
curr_tile_y_on_layer = tile_y + y_off
curr_x_on_layer = curr_tile_x_on_layer*0x10
curr_y_on_layer = curr_tile_y_on_layer*0x10
if curr_x_on_layer >= self.room.width:
continue
if curr_y_on_layer >= self.room.height:
continue
tile_index_16x16 = curr_tileset_scene.selected_tile_indexes[x_off + y_off*curr_tileset_scene.selection_w]
tile_pixmap = self.get_tile_pixmap_by_16x16_index(tile_index_16x16, curr_x_on_layer, curr_y_on_layer)
tile_item = self.tile_graphics_items_by_pos[curr_tile_x_on_layer][curr_tile_y_on_layer]
tile_item.setPixmap(tile_pixmap)
room_width_in_16x16_tiles = self.room.width//16
tile_index_on_layer = curr_tile_y_on_layer*room_width_in_16x16_tiles + curr_tile_x_on_layer
self.layer.data[tile_index_on_layer] = tile_index_16x16
self.layer.has_unsaved_changes = True
elif button == Qt.RightButton:
room_width_in_16x16_tiles = self.room.width//16
tile_index_on_layer = tile_y*room_width_in_16x16_tiles + tile_x
tile_index_on_tileset = self.layer.data[tile_index_on_layer]
curr_tileset_scene.select_tile_by_index(tile_index_on_tileset)
def render_layer(self):
room = self.room
area = room.area
layer_index = self.layer_index
if room.area.uses_256_color_bg1s:
if layer_index == 2:
self.render_layer_mapped(color_mode=256)
else:
# Their BG1s may be unused? They seem to error out when trying to render them. TODO figure them out
pass
else:
if layer_index == 3:
if area.get_gfx_asset_list(room.gfx_index).tile_mappings_8x8[layer_index] is None:
return
self.render_layer_mapped(color_mode=16)
elif room.layers_asset_list.tile_mappings_8x8[layer_index] is not None:
self.render_layer_mapped(color_mode=16)
else:
self.render_layer_16_color()
def render_layer_16_color(self):
room = self.room
area = room.area
layer_index = self.layer_index
self.tile_graphics_items_by_pos = []
for tile_x in range(room.width//0x10):
self.tile_graphics_items_by_pos.append([])
for tile_y in range(room.height//0x10):
self.tile_graphics_items_by_pos[tile_x].append(None)
gfx_asset_list = area.get_gfx_asset_list(room.gfx_index)
orig_gfx_data = gfx_asset_list.gfx_data
if layer_index in [1, 3]:
self.gfx_data = orig_gfx_data.read_raw(0x4000, len(orig_gfx_data)-0x4000)
else:
self.gfx_data = orig_gfx_data
self.palettes = self.renderer.generate_palettes_for_area_by_gfx_index(room.area, room.gfx_index)
self.tileset_data = room.area.tilesets_asset_list.tileset_datas[layer_index]
if self.tileset_data is None:
return
self.layer = room.layers_asset_list.layers[layer_index]
if self.layer is None:
raise Exception("Layer BG%d has no layer data" % layer_index)
if len(self.layer.data) == 0:
raise Exception("Layer BG%d has zero-length layer data" % layer_index)
if self.layer.data[0] == 0xFFFF:
# No real layer data here
return
self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids = {}
room_width_in_16x16_tiles = room.width//16
self.cached_tile_pixmaps_by_16x16_index = {}
for i in range(len(self.layer.data)):
tile_index_16x16 = self.layer.data[i]
x = (i % room_width_in_16x16_tiles)*16
y = (i // room_width_in_16x16_tiles)*16
tile_pixmap = self.get_tile_pixmap_by_16x16_index(tile_index_16x16, x, y)
tile_item = QGraphicsPixmapItem(tile_pixmap, self)
tile_item.setPos(x, y)
self.tile_graphics_items_by_pos[x//0x10][y//0x10] = tile_item
def get_tile_pixmap_by_16x16_index(self, tile_index_16x16, x, y):
if tile_index_16x16 in self.cached_tile_pixmaps_by_16x16_index:
tile_pixmap = self.cached_tile_pixmaps_by_16x16_index[tile_index_16x16]
else:
tile_pixmap = self.render_tile_pixmap_by_16x16_tile_index(tile_index_16x16, x, y)
self.cached_tile_pixmaps_by_16x16_index[tile_index_16x16] = tile_pixmap
return tile_pixmap
def render_tile_pixmap_by_16x16_tile_index(self, tile_index_16x16, x, y):
room = self.room
layer_index = self.layer_index
gfx_data = self.gfx_data
palettes = self.palettes
zone_ids = []
if self.room.zone_lists:
zone_ids = VisualZone.get_zone_ids_overlapping_point(self.room.zone_lists, x, y)
if zone_ids:
gfx_data = gfx_data.copy()
for zone_id in zone_ids:
zone_data = room.visual_zone_datas[zone_id]
if zone_data.palette_group_index is not None:
palettes = self.renderer.generate_palettes_from_palette_group_by_index(zone_data.palette_group_index)
for zone_gfx_data_ptr, zone_gfx_load_offset in zone_data.gfx_load_datas:
if layer_index in [1, 3]:
zone_gfx_load_offset -= 0x4000
if zone_gfx_load_offset < 0:
continue
zone_gfx_data = self.rom.read_raw(zone_gfx_data_ptr, 0x1000)
gfx_data.write_raw(zone_gfx_load_offset, zone_gfx_data)
tile_image_16x16 = QImage(16, 16, QImage.Format_ARGB32)
tile_image_16x16.fill(0)
painter = QPainter(tile_image_16x16)
zone_ids_tuple = tuple(zone_ids)
if zone_ids_tuple not in self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids:
self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids[zone_ids_tuple] = {}
cached_8x8_tile_images_by_tile_attrs = self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids[zone_ids_tuple]
try:
for tile_8x8_i in range(4):
tile_attrs = self.tileset_data[tile_index_16x16*4 + tile_8x8_i]
horizontal_flip = (tile_attrs & 0x0400) > 0
vertical_flip = (tile_attrs & 0x0800) > 0
# Remove flip bits so all 4 orientations can be cached together as one.
tile_attrs &= (~0x0C00)
if tile_attrs in cached_8x8_tile_images_by_tile_attrs:
data = cached_8x8_tile_images_by_tile_attrs[tile_attrs]
else:
pil_image = self.renderer.render_tile_by_tile_attrs(tile_attrs, gfx_data, palettes)
data = pil_image.tobytes('raw', 'BGRA')
cached_8x8_tile_images_by_tile_attrs[tile_attrs] = data
# For some reason, QImages can't be cached safely, they would become corrupted looking.
# So cache just the image data instead.
tile_image_8x8 = QImage(data, 8, 8, QImage.Format_ARGB32)
if horizontal_flip and vertical_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(-1, -1))
elif horizontal_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(-1, 1))
elif vertical_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(1, -1))
x_on_16x16_tile = (tile_8x8_i % 2)*8
y_on_16x16_tile = (tile_8x8_i // 2)*8
painter.drawImage(x_on_16x16_tile, y_on_16x16_tile, tile_image_8x8)
except:
# Need to properly end the painter or the program will crash
painter.end()
raise
painter.end()
tile_pixmap = QPixmap.fromImage(tile_image_16x16)
return tile_pixmap
def render_layer_mapped(self, color_mode=256):
room = self.room
layer_index = self.layer_index
palettes = self.renderer.generate_palettes_for_area_by_gfx_index(room.area, room.gfx_index)
layer_image = self.renderer.render_layer_mapped(self.room, palettes, layer_index, color_mode=color_mode)
data = layer_image.tobytes('raw', 'BGRA')
qimage = QImage(data, layer_image.size[0], layer_image.size[1], QImage.Format_ARGB32)
layer_pixmap = QPixmap.fromImage(qimage)
graphics_item = QGraphicsPixmapItem(layer_pixmap, self)
|
[
"mclib.visual_zone.VisualZone.get_zone_ids_overlapping_point",
"traceback.format_exc"
] |
[((5739, 5808), 'mclib.visual_zone.VisualZone.get_zone_ids_overlapping_point', 'VisualZone.get_zone_ids_overlapping_point', (['self.room.zone_lists', 'x', 'y'], {}), '(self.room.zone_lists, x, y)\n', (5780, 5808), False, 'from mclib.visual_zone import VisualZone\n'), ((510, 532), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (530, 532), False, 'import traceback\n')]
|
#!/usr/bin/env python
from pandocfilters import toJSONFilter, RawInline
"""
Pandoc filter that causes emphasis to be rendered using
the custom macro '\myemph{...}' rather than '\emph{...}'
in latex. Other output formats are unaffected.
"""
def latex(s):
return RawInline('latex', s)
def myemph(k, v, f, meta):
if k == 'Emph' and f == 'latex':
return [latex('\\myemph{')] + v + [latex('}')]
if __name__ == "__main__":
toJSONFilter(myemph)
|
[
"pandocfilters.toJSONFilter",
"pandocfilters.RawInline"
] |
[((266, 287), 'pandocfilters.RawInline', 'RawInline', (['"""latex"""', 's'], {}), "('latex', s)\n", (275, 287), False, 'from pandocfilters import toJSONFilter, RawInline\n'), ((432, 452), 'pandocfilters.toJSONFilter', 'toJSONFilter', (['myemph'], {}), '(myemph)\n', (444, 452), False, 'from pandocfilters import toJSONFilter, RawInline\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 <NAME>, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
# for debugging
DEBUG = 0
DEFAULT_HTTP_PORT = 80
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
self.headers = dict()
def __str__(self):
''' for debugging '''
s = {"code": self.code, "body": self.body, "headers": self.headers}
return str(s)
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return True
except Exception as e:
print("Problem in connection to %s on port %d" % (host, port))
return False
def get_code(self, data):
''' the work of get_code, get_headers and get_body is
done by 1 parse of response in parse_response(..) '''
return None
def get_headers(self, data):
return None
def get_body(self, data):
return None
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
valid, host, port, path = self.parse_url(url)
if not valid:
print("[GET] Malformed HTTP URL: %s" % url)
return HTTPResponse(code, body)
if not port:
# when if requesting on a URL with no port use default
# port 80
port = DEFAULT_HTTP_PORT
if not path or path == "":
path = "/"
if not self.connect(host, port):
return HTTPResponse(code, body)
# got sample http GET request format from
# curl -v http://www.cs.ualberta.ca
req = "GET " + path + " HTTP/1.1\r\n"
req += "Host: " + host + ":" + str(port) + "\r\n"
req += "User-Agent: " + "curl/7.71.1" + "\r\n"
req += "Accept: " + "*/*" + "\r\n"
req += "\r\n"
req += path
if DEBUG:
print("[GET] Requesting...")
print(req + "\n********************")
self.sendall(req)
response = self.recvall(self.socket)
self.close()
if DEBUG:
print("*****Response:******")
print(response + "\n********************")
return self.parse_response(response)
def POST(self, url, args=None):
'''
POST on URL.
TODO: GET and POST have a lot of common code: scope of refactoring
'''
code = 500
body = ""
valid, host, port, path = self.parse_url(url)
if not valid:
print("[POST] Malformed HTTP URL: %s" % url)
return HTTPResponse(code, body)
if not port:
# when if requesting on a URL with no port use default
# port 80
port = DEFAULT_HTTP_PORT
if not path or path == "":
path = "/"
if not self.connect(host, port):
return HTTPResponse(code, body)
# got sample http POST request format from
# curl -v -d "a=aaa&b=bbbb" -X POST http://127.0.0.1:3000
if args:
payload = urllib.parse.urlencode(args)
payload_len = len(payload)
else:
payload_len = 0
req = "POST " + path + " HTTP/1.1\r\n"
req += "Host: " + host + ":" + str(port) + "\r\n"
req += "User-Agent: " + "curl/7.71.1" + "\r\n"
req += "Accept: " + "*/*" + "\r\n"
req += "Content-Length: " + str(payload_len) + "\r\n"
req += "Content-Type: application/x-www-form-urlencoded\r\n"
req += "\r\n"
if args:
req += payload
if DEBUG:
print("[POST] Requesting...")
print(req + "\n********************")
self.sendall(req)
response = self.recvall(self.socket)
self.close()
if DEBUG:
print("*****Response:******")
print(response + "\n********************")
return self.parse_response(response)
def parse_url(self, url):
'''
A valid URL starts with http:// or https://.
Then has a host and a port separated by comma.
This returns <valid>, host, port, path
where, valid is True/False, and host and port from the url
'''
parsed = urllib.parse.urlparse(url)
scheme = parsed.scheme
if scheme != "http" and scheme != "https":
return False, None, None, None
return True, parsed.hostname, parsed.port, parsed.path
def parse_response(self, response_str):
'''
Parse an http response as a string, extract body, status code and
headers, and return an httpclient.HTTPResponse object
'''
response_obj = HTTPResponse(500, '')
lines = response_str.split("\n")
if len(lines) == 0:
return response_obj
if not lines[0].startswith('HTTP/1.0 ') and not lines[0].startswith('HTTP/1.1 '):
if DEBUG:
print("Bad 1st line in response. Expected HTTP/1.0 or HTTP/1.1")
return response_obj
resp_line_pattern = re.compile("HTTP/1\.. (\d+) .*")
matches = resp_line_pattern.match(lines[0])
if not matches:
if DEBUG:
print("Bad 1st line in response: %s" % lines[0])
return response_obj
code = int(matches.group(1))
response_obj.code = code
# parse headers
i = 1
while i < len(lines):
header_line = lines[i].strip()
if header_line == "":
break
tok = header_line.split(":")
if len(tok) < 2:
# header_name: header_val is not there
if DEBUG:
print("[WARN] Bad header line::: %s" % header_line)
else:
header_name = tok[0].strip()
header_val = ''.join(tok[1:])
header_val = header_val.strip()
response_obj.headers[header_name] = header_val
i += 1
# extract body if exists
body = ''
if i+1 < len(lines):
body = lines[i+1]
response_obj.body = body
return response_obj
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
|
[
"socket.socket",
"sys.exit",
"re.compile"
] |
[((6555, 6589), 're.compile', 're.compile', (['"""HTTP/1\\\\.. (\\\\d+) .*"""'], {}), "('HTTP/1\\\\.. (\\\\d+) .*')\n", (6565, 6589), False, 'import re\n'), ((7977, 7988), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7985, 7988), False, 'import sys\n'), ((1497, 1546), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1510, 1546), False, 'import socket\n')]
|
import time
import random
from ggplib.util import log
def depth_charges(sm, seconds):
# play for n seconds
seconds = float(seconds)
log.info("depth_charges() : playing for %s seconds" % seconds)
role_count = len(sm.get_roles())
# cache some objects
joint_move = sm.get_joint_move()
base_state = sm.new_base_state()
# resolution is assumed to be good enough not to cheat too much here (we return
# msecs_taken so it is all good)
start_time = cur_time = time.time()
end_time = start_time + seconds
rollouts = 0
num_state_changes = 0
all_scores = [[] for i in range(role_count)]
while cur_time < end_time:
# the number of moves of the game
depth = 0
# tells the state machine to reset everything and return to initial state
sm.reset()
# while the game has not ended
while not sm.is_terminal():
# choose a random move for each role
for role_index in range(role_count):
ls = sm.get_legal_state(role_index)
choice = ls.get_legal(random.randrange(0, ls.get_count()))
joint_move.set(role_index, choice)
# play move, the base_state will be new state
sm.next_state(joint_move, base_state)
# update the state machine to new state
sm.update_bases(base_state)
# increment the depth
depth += 1
# simulate side effect of getting the scores from the statemachine
for ri in range(role_count):
all_scores[ri].append(sm.get_goal_value(ri))
# stats
rollouts += 1
num_state_changes += depth
# update the time
cur_time = time.time()
rollouts_per_second = rollouts / seconds
log.info("rollouts per second %s" % rollouts_per_second)
log.info("average time msecs %s" % ((seconds / rollouts) * 1000))
log.info("average depth %s" % (num_state_changes / rollouts))
for ri, role in enumerate(sm.get_roles()):
total_score = sum(all_scores[ri])
log.info("average score for %s : %s" % (role, total_score / float(rollouts)))
|
[
"ggplib.util.log.info",
"time.time"
] |
[((148, 210), 'ggplib.util.log.info', 'log.info', (["('depth_charges() : playing for %s seconds' % seconds)"], {}), "('depth_charges() : playing for %s seconds' % seconds)\n", (156, 210), False, 'from ggplib.util import log\n'), ((499, 510), 'time.time', 'time.time', ([], {}), '()\n', (508, 510), False, 'import time\n'), ((1799, 1855), 'ggplib.util.log.info', 'log.info', (["('rollouts per second %s' % rollouts_per_second)"], {}), "('rollouts per second %s' % rollouts_per_second)\n", (1807, 1855), False, 'from ggplib.util import log\n'), ((1860, 1923), 'ggplib.util.log.info', 'log.info', (["('average time msecs %s' % (seconds / rollouts * 1000))"], {}), "('average time msecs %s' % (seconds / rollouts * 1000))\n", (1868, 1923), False, 'from ggplib.util import log\n'), ((1930, 1991), 'ggplib.util.log.info', 'log.info', (["('average depth %s' % (num_state_changes / rollouts))"], {}), "('average depth %s' % (num_state_changes / rollouts))\n", (1938, 1991), False, 'from ggplib.util import log\n'), ((1737, 1748), 'time.time', 'time.time', ([], {}), '()\n', (1746, 1748), False, 'import time\n')]
|
import unittest
from moment import moment
from datetime import timedelta
class TestAddOperator(unittest.TestCase):
def test_day(self):
a = moment('20201228').add(3, 'd')
b = moment('20201228') + timedelta(days=3)
self.assertEqual(a, b)
def test_second(self):
a = moment('20201228').add(80, 's')
b = moment('20201228') + timedelta(seconds=80)
self.assertEqual(a, b)
def test_millisecond(self):
a = moment('20201228').add(183, 'ms')
b = moment('20201228') + timedelta(milliseconds=183)
self.assertEqual(a, b)
def test_minute(self):
a = moment('20201228').add(7, 'm')
b = moment('20201228') + timedelta(minutes=7)
self.assertEqual(a, b)
def test_hour(self):
a = moment('20201228').add(13, 'h')
b = moment('20201228') + timedelta(hours=13)
self.assertEqual(a, b)
def test_hour(self):
a = moment('20201228').add(5, 'w')
b = moment('20201228') + timedelta(weeks=5)
self.assertEqual(a, b)
def test_not_implement(self):
with self.assertRaises(TypeError):
a = moment('2021-4-2 04:02:09.957031 +0800')
a + 2
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"moment.moment",
"datetime.timedelta"
] |
[((1244, 1259), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1257, 1259), False, 'import unittest\n'), ((197, 215), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (203, 215), False, 'from moment import moment\n'), ((218, 235), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (227, 235), False, 'from datetime import timedelta\n'), ((351, 369), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (357, 369), False, 'from moment import moment\n'), ((372, 393), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(80)'}), '(seconds=80)\n', (381, 393), False, 'from datetime import timedelta\n'), ((516, 534), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (522, 534), False, 'from moment import moment\n'), ((537, 564), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(183)'}), '(milliseconds=183)\n', (546, 564), False, 'from datetime import timedelta\n'), ((679, 697), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (685, 697), False, 'from moment import moment\n'), ((700, 720), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(7)'}), '(minutes=7)\n', (709, 720), False, 'from datetime import timedelta\n'), ((834, 852), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (840, 852), False, 'from moment import moment\n'), ((855, 874), 'datetime.timedelta', 'timedelta', ([], {'hours': '(13)'}), '(hours=13)\n', (864, 874), False, 'from datetime import timedelta\n'), ((987, 1005), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (993, 1005), False, 'from moment import moment\n'), ((1008, 1026), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(5)'}), '(weeks=5)\n', (1017, 1026), False, 'from datetime import timedelta\n'), ((1152, 1192), 'moment.moment', 'moment', (['"""2021-4-2 04:02:09.957031 +0800"""'], {}), "('2021-4-2 04:02:09.957031 +0800')\n", (1158, 1192), False, 'from moment import moment\n'), ((154, 172), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (160, 172), False, 'from moment import moment\n'), ((307, 325), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (313, 325), False, 'from moment import moment\n'), ((470, 488), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (476, 488), False, 'from moment import moment\n'), ((636, 654), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (642, 654), False, 'from moment import moment\n'), ((790, 808), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (796, 808), False, 'from moment import moment\n'), ((944, 962), 'moment.moment', 'moment', (['"""20201228"""'], {}), "('20201228')\n", (950, 962), False, 'from moment import moment\n')]
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import acl
from flask import Flask, g
from flask_restful import reqparse, Api, Resource
from flask_httpauth import HTTPTokenAuth
import base64
from utils import *
from acl_dvpp import Dvpp
from acl_model import Model
from acl_image import AclImage
from image_net_classes import get_image_net_class
from PIL import Image, ImageDraw, ImageFont
import numpy as np
ret = acl.init()
check_ret("acl.rt.set_device", ret)
class Classify(object):
def __init__(self, model_path, model_width, model_height):
self.device_id = 0
self.context = None
self.stream = None
self._model_path = model_path
self._model_width = model_width
self._model_height = model_height
self._dvpp = None
def __del__(self):
if self._model:
del self._model
if self._dvpp:
del self._dvpp
if self.stream:
acl.rt.destroy_stream(self.stream)
if self.context:
acl.rt.destroy_context(self.context)
acl.rt.reset_device(self.device_id)
acl.finalize()
print("[Sample] class Samle release source success")
def destroy(self):
self.__del__
def _init_resource(self):
print("[Sample] init resource stage:")
#ret = acl.init()
#check_ret("acl.rt.set_device", ret)
ret = acl.rt.set_device(self.device_id)
check_ret("acl.rt.set_device", ret)
self.context, ret = acl.rt.create_context(self.device_id)
check_ret("acl.rt.create_context", ret)
self.stream, ret = acl.rt.create_stream()
check_ret("acl.rt.create_stream", ret)
self.run_mode, ret = acl.rt.get_run_mode()
check_ret("acl.rt.get_run_mode", ret)
print("Init resource stage success")
def init(self):
self._init_resource()
self._dvpp = Dvpp(self.stream, self.run_mode)
ret = self._dvpp.init_resource()
if ret != SUCCESS:
print("Init dvpp failed")
return FAILED
self._model = Model(self.run_mode, self._model_path)
ret = self._model.init_resource()
if ret != SUCCESS:
print("Init model failed")
return FAILED
return SUCCESS
def pre_process(self, image):
yuv_image = self._dvpp.jpegd(image)
print("decode jpeg end")
resized_image = self._dvpp.resize(yuv_image,
self._model_width, self._model_height)
print("resize yuv end")
return resized_image
def inference(self, resized_image):
return self._model.execute(resized_image.data(), resized_image.size)
def post_process(self, infer_output, image_file):
print("post process")
data = infer_output[0]
vals = data.flatten()
top_k = vals.argsort()[-1:-6:-1]
print("images:{}".format(image_file))
print("======== top5 inference results: =============")
for n in top_k:
object_class = get_image_net_class(n)
print("label:%d confidence: %f, class: %s" % (n, vals[n], object_class))
object_class = get_image_net_class(top_k[0])
return object_class
MODEL_PATH = "./model/googlenet_yuv.om"
MODEL_WIDTH = 224
MODEL_HEIGHT = 224
def main_process():
classify = Classify(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)
ret = classify.init()
if not os.path.isdir('./outputs'):
os.mkdir('./outputs')
image_dir = "./origin"
images_list = [os.path.join(image_dir, img)
for img in os.listdir(image_dir)
if os.path.splitext(img)[1] in IMG_EXT]
for image_file in images_list:
image = AclImage(image_file)
resized_image = classify.pre_process(image)
print("pre process end")
result = classify.inference(resized_image)
result_img_encode = classify.post_process(result, image_file)
#acl_resource.destroy()
#classify.destroy()
return result_img_encode
def base64_decode(img_encode, img_type):
img_np = np.fromstring(base64.b64decode(img_encode), np.uint8)
img_file = "./origin/origin." + img_type
with open(img_file, 'wb') as f:
f.write(base64.b64decode(img_encode))
f.close()
return img_np
class TodoList(Resource):
def post(self):
args = parser_put.parse_args()
img_data_base64_encode = args['img_data']
img_type = args['img_type']
img_decode = base64_decode(img_data_base64_encode, img_type)
#print(img_decode)
result_img_encode = main_process()
os.remove("./origin/origin.jpg")
q = 400
for n in range(1,1000):
if str(result_img_encode)==get_image_net_class(n):
q=200
break
result = {"result":str(result_img_encode),"code":q}
return result
app = Flask(__name__)
api = Api(app)
parser_put = reqparse.RequestParser()
parser_put.add_argument("img_data", type=str, required=True, help="need img data")
parser_put.add_argument("img_type", type=str, required=True, help="need img type")
api.add_resource(TodoList, "/users")
if __name__ == '__main__':
app.run(host="192.168.0.169", port=7002, debug=True)
|
[
"flask_restful.Api",
"os.mkdir",
"os.remove",
"acl.finalize",
"base64.b64decode",
"acl_model.Model",
"os.path.join",
"acl.rt.set_device",
"image_net_classes.get_image_net_class",
"acl.rt.create_context",
"acl.rt.destroy_context",
"flask_restful.reqparse.RequestParser",
"acl.rt.get_run_mode",
"acl.rt.reset_device",
"acl_image.AclImage",
"acl.rt.destroy_stream",
"acl.rt.create_stream",
"os.listdir",
"acl_dvpp.Dvpp",
"os.path.isdir",
"flask.Flask",
"os.path.splitext",
"acl.init"
] |
[((432, 442), 'acl.init', 'acl.init', ([], {}), '()\n', (440, 442), False, 'import acl\n'), ((5153, 5168), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (5158, 5168), False, 'from flask import Flask, g\n'), ((5175, 5183), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (5178, 5183), False, 'from flask_restful import reqparse, Api, Resource\n'), ((5198, 5222), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (5220, 5222), False, 'from flask_restful import reqparse, Api, Resource\n'), ((1074, 1109), 'acl.rt.reset_device', 'acl.rt.reset_device', (['self.device_id'], {}), '(self.device_id)\n', (1093, 1109), False, 'import acl\n'), ((1118, 1132), 'acl.finalize', 'acl.finalize', ([], {}), '()\n', (1130, 1132), False, 'import acl\n'), ((1419, 1452), 'acl.rt.set_device', 'acl.rt.set_device', (['self.device_id'], {}), '(self.device_id)\n', (1436, 1452), False, 'import acl\n'), ((1526, 1563), 'acl.rt.create_context', 'acl.rt.create_context', (['self.device_id'], {}), '(self.device_id)\n', (1547, 1563), False, 'import acl\n'), ((1640, 1662), 'acl.rt.create_stream', 'acl.rt.create_stream', ([], {}), '()\n', (1660, 1662), False, 'import acl\n'), ((1740, 1761), 'acl.rt.get_run_mode', 'acl.rt.get_run_mode', ([], {}), '()\n', (1759, 1761), False, 'import acl\n'), ((1937, 1969), 'acl_dvpp.Dvpp', 'Dvpp', (['self.stream', 'self.run_mode'], {}), '(self.stream, self.run_mode)\n', (1941, 1969), False, 'from acl_dvpp import Dvpp\n'), ((2152, 2190), 'acl_model.Model', 'Model', (['self.run_mode', 'self._model_path'], {}), '(self.run_mode, self._model_path)\n', (2157, 2190), False, 'from acl_model import Model\n'), ((3237, 3266), 'image_net_classes.get_image_net_class', 'get_image_net_class', (['top_k[0]'], {}), '(top_k[0])\n', (3256, 3266), False, 'from image_net_classes import get_image_net_class\n'), ((3517, 3543), 'os.path.isdir', 'os.path.isdir', (['"""./outputs"""'], {}), "('./outputs')\n", (3530, 3543), False, 'import os\n'), ((3553, 3574), 'os.mkdir', 'os.mkdir', (['"""./outputs"""'], {}), "('./outputs')\n", (3561, 3574), False, 'import os\n'), ((3630, 3658), 'os.path.join', 'os.path.join', (['image_dir', 'img'], {}), '(image_dir, img)\n', (3642, 3658), False, 'import os\n'), ((3831, 3851), 'acl_image.AclImage', 'AclImage', (['image_file'], {}), '(image_file)\n', (3839, 3851), False, 'from acl_image import AclImage\n'), ((4288, 4316), 'base64.b64decode', 'base64.b64decode', (['img_encode'], {}), '(img_encode)\n', (4304, 4316), False, 'import base64\n'), ((4851, 4883), 'os.remove', 'os.remove', (['"""./origin/origin.jpg"""'], {}), "('./origin/origin.jpg')\n", (4860, 4883), False, 'import os\n'), ((957, 991), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self.stream'], {}), '(self.stream)\n', (978, 991), False, 'import acl\n'), ((1029, 1065), 'acl.rt.destroy_context', 'acl.rt.destroy_context', (['self.context'], {}), '(self.context)\n', (1051, 1065), False, 'import acl\n'), ((3105, 3127), 'image_net_classes.get_image_net_class', 'get_image_net_class', (['n'], {}), '(n)\n', (3124, 3127), False, 'from image_net_classes import get_image_net_class\n'), ((3689, 3710), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3699, 3710), False, 'import os\n'), ((4432, 4460), 'base64.b64decode', 'base64.b64decode', (['img_encode'], {}), '(img_encode)\n', (4448, 4460), False, 'import base64\n'), ((4980, 5002), 'image_net_classes.get_image_net_class', 'get_image_net_class', (['n'], {}), '(n)\n', (4999, 5002), False, 'from image_net_classes import get_image_net_class\n'), ((3733, 3754), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (3749, 3754), False, 'import os\n')]
|
from __future__ import print_function
import sys
import time
import uuid
import copy
import random
import logging
import threading
try:
import Queue
except ImportError: # for python3
import queue as Queue
import msgpack
import raft.store as store
import raft.tcp as channel
import raft.log as log
def make_server(port=9289, bootstraps=None):
queue = Queue.Queue()
server = Server(queue, port, bootstraps)
server.start()
return queue
def iteritems(dictobj):
if sys.version_info[0] == 2:
return dictobj.itemsiter()
else:
return dictobj.items()
class Server(threading.Thread):
def __init__(self, queue, port, bootstraps):
self.port = port
self.load()
self.bootstraps = bootstraps
self.queue = queue
self.role = 'follower'
self.channel = channel.start(port, self.uuid)
self.last_update = time.time()
self.commitidx = 0
self.update_uuid = None
self.leader = None
self.newpeers = None
self.oldpeers = None
threading.Thread.__init__(self)
self.daemon = True
#
## startup and state methods
#
def load(self):
self.term, self.voted, llog, self.peers, \
self.uuid = store.read_state(self.port)
self.log = log.RaftLog(llog)
def save(self):
store.write_state(self.port, self.term, self.voted,
self.log.dump(), self.peers, self.uuid)
def run(self):
self.running = True
while self.running:
for peer in self.peers:
if not peer in self.channel and peer != self.uuid:
self.channel.connect(self.peers[peer])
for addr in self.bootstraps:
self.channel.connectbs(addr, self.bootstrap_cb)
channelans = self.channel.recv(0.15)
if channelans:
for peer, msgs in channelans:
for msg in msgs:
self.handle_message(msg, peer)
else:
self.housekeeping()
#
## message handling
#
def handle_message(self, msg, addr):
# got a new message
# update our term if applicable, and dispatch the message
# to the appropriate handler. finally, if we are still
# (or have become) the leader, send out heartbeats
try:
msg = msgpack.unpackb(msg, use_list=False, encoding='utf-8')
except msgpack.UnpackException:
return
mtype = msg['type']
term = msg.get('term', None)
msg['src'] = addr
uuid = msg.get('id', None)
# no matter what, if our term is old, update and step down
if term and term > self.term and self.valid_peer(uuid):
# okay, well, only if it's from a valid source
self.term = term
self.voted = None
self.role = 'follower'
mname = 'handle_msg_%s_%s' % (self.role, mtype)
if hasattr(self, mname):
getattr(self, mname)(msg)
if self.role == 'leader' and time.time() - self.last_update > 0.3:
self.send_ae()
def handle_msg_candidate_bootstrap(self, msg):
self.handle_msg_follower_bootstrap(msg)
def handle_msg_follower_bootstrap(self, msg):
# bootstrap packets solve the problem of how we find the
# id of our peers. we don't want to have to copy uuids around
# when they could just mail them to each other.
print(msg)
print(self.peers)
def handle_msg_leader_ae_reply(self, msg):
# we are a leader who has received an ae ack
# if the update was rejected, it's because the follower
# has an incorrect log entry, so send an update for that
# log entry as well
# if the update succeeded, record that in the log and,
# if the log has been recorded by enough followers, mark
# it committed.
uuid = msg['id']
if not self.valid_peer(uuid):
return
success = msg['success']
index = msg['index']
if success:
self.next_index[uuid] = index
if self.log.get_commit_index() < index:
self.msg_recorded(msg)
else:
# exponentially reduce the index for peers
# this way if they're only missing a couple log entries,
# we only have to send 2 or 4, but if they're missing
# a couple thousand we'll find out in less than 2k round
# trips
oldidx = self.next_index.get(uuid, 0)
diff = self.log.maxindex() - oldidx
diff = max(diff, 1)
oldidx -= diff
self.next_index[uuid] = max(oldidx, 0)
def handle_msg_follower_ae(self, msg):
# we are a follower who just got an append entries rpc
# reset the timeout counter
uuid = msg['id']
if not self.valid_peer(uuid):
return
term = msg['term']
if term < self.term:
return
self.last_update = time.time()
self.leader = msg['id']
logs = msg['entries']
previdx = msg['previdx']
prevterm = msg['prevterm']
if not self.log.exists(previdx, prevterm):
rpc = self.ae_rpc_reply(previdx, prevterm, False)
self.send_to_peer(rpc, self.leader)
return
cidx = msg['commitidx']
if cidx > self.commitidx: # don't lower the commit index
self.commitidx = cidx
self.log.force_commit(cidx)
if self.update_uuid:
self.check_update_committed()
if not logs:
# heartbeat
return
for ent in sorted(logs):
val = logs[ent]
self.process_possible_update(val)
self.log.add(val)
maxmsg = self.log.get_by_index(self.log.maxindex())
rpc = self.ae_rpc_reply(maxmsg['index'], maxmsg['term'], True)
self.send_to_peer(rpc, self.leader)
def handle_msg_candidate_ae(self, msg):
# someone else was elected during our candidacy
term = msg['term']
uuid = msg['id']
if not self.valid_peer(uuid):
return
if term < self.term:
# illegitimate, toss it
return
self.role = 'follower'
self.handle_msg_follower_ae(msg)
def handle_msg_follower_cq(self, msg):
try:
rpc = self.cr_rdr_rpc(msg['id'])
src = msg['src']
self.send_to_peer(rpc, src)
except:
# we're allowed not to respond at all, in this case,
# so if we crashed for some reason, just ignore it
return
def handle_msg_leader_cq(self, msg):
src = msg['src']
if msg['id'] is None:
msgid = uuid.uuid4().hex
msg['id'] = msgid
self.add_to_log(msg)
rpc = self.cr_rpc_ack(msg['id'])
self.send_to_peer(rpc, src)
def handle_msg_leader_cq_inq(self, msg):
src = msg['src']
msgid = msg['id']
info = {}
inquiry = self.log.get_by_uuid(msgid)
if inquiry is None:
info['status'] = 'unknown'
elif inquiry['index'] > self.commitidx:
info['status'] = 'pending'
else:
info['status'] = 'committed'
rpc = self.cr_rpc_ack(msgid, info)
self.send_to_peer(rpc, src)
def handle_msg_candidate_rv(self, msg):
# don't vote for a different candidate!
uuid = msg['id']
if self.uuid == uuid:
# huh
return
if not self.valid_peer(uuid):
return
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
def handle_msg_follower_rv(self, msg):
term = msg['term']
uuid = msg['id']
if not self.valid_peer(uuid):
return
olog = {msg['log_index']: {
'index': msg['log_index'],
'term': msg['log_term'],
'msgid': '',
'msg': {}}}
olog = log.RaftLog(olog)
if term < self.term:
# someone with a smaller term wants to get elected
# as if
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
return
if (self.voted is None or self.voted == uuid) and self.log <= olog:
# we can vote for this guy
self.voted = uuid
self.save()
rpc = self.rv_rpc_reply(True)
self.last_update = time.time()
self.send_to_peer(rpc, uuid)
return
# we probably voted for somebody else, or the log is old
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
def handle_msg_candidate_rv_reply(self, msg):
uuid = msg['id']
if not self.valid_peer(uuid):
return
voted = msg['voted']
if voted:
self.cronies.add(uuid)
else:
self.refused.add(uuid)
if len(self.cronies) >= self.quorum():
# won the election
self.role = 'leader'
self.next_index = {}
self.commitidx = self.log.get_commit_index()
maxidx = self.log.maxindex()
for uuid in self.all_peers():
# just start by pretending everyone is caught up,
# they'll let us know if not
self.next_index[uuid] = maxidx
def handle_msg_leader_pu(self, msg):
if self.update_uuid:
# we're either already in the middle of this, or we're
# in the middle of something *else*, so piss off
return
uuid = msg['id']
# got a new update request
# it will consist of machines to add and to remove
# here we perform the first phase of the update, by
# telling clients to add the new machines to their
# existing peer set.
msg['phase'] = 1
self.newpeers = msg['config'] # adopt the new config right away
if not self.newpeers:
return
self.update_uuid = uuid
self.add_to_log(msg)
def housekeeping(self):
now = time.time()
if self.role == 'candidate':
elapsed = now - self.election_start
if now - self.last_update > 0.5 and self.role == 'follower':
# got no heartbeats; leader is probably dead
# establish candidacy and run for election
self.call_election()
elif self.role == 'candidate' and elapsed < self.election_timeout:
# we're in an election and haven't won, but the
# timeout isn't expired. repoll peers that haven't
# responded yet
self.campaign()
elif self.role == 'candidate':
# the election timeout *has* expired, and we *still*
# haven't won or lost. call a new election.
self.call_election()
elif self.role == 'leader':
# send a heartbeat
self.send_ae()
#
## convenience methods
#
def send_ae(self):
self.last_update = time.time()
for uuid in self.all_peers():
if uuid == self.uuid: # no selfies
continue
ni = self.next_index.get(uuid, self.log.maxindex())
logs = self.log.logs_after_index(ni)
rpc = self.ae_rpc(uuid, logs)
self.send_to_peer(rpc, uuid)
def call_election(self):
self.term += 1
self.voted = self.uuid
self.save()
self.cronies = set()
self.refused = set()
self.cronies.add(self.uuid)
self.election_start = time.time()
self.election_timeout = 0.5 * random.random() + 0.5
self.role = 'candidate'
self.campaign()
def campaign(self):
voted = self.cronies.union(self.refused) # everyone who voted
voters = set(self.peers)
if self.newpeers:
voters = voters.union(set(self.newpeers))
remaining = voters.difference(voted) # peers who haven't
rpc = self.rv_rpc()
for uuid in remaining:
self.send_to_peer(rpc, uuid)
def check_update_committed(self):
# we (a follower) just learned that one or more
# logs were committed, *and* we are in the middle of an
# update. check to see if that was phase 2 of the update,
# and remove old hosts if so
umsg = self.log.get_by_uuid(self.update_uuid)
if umsg['index'] > self.commitidx:
# isn't yet committed
return
data = umsg['msg']
if data['phase'] == 2:
self.oldpeers = None
self.update_uuid = None
if not self.uuid in self.all_peers():
self.running = False
def process_possible_update(self, msg):
if not 'msg' in msg:
return
data = msg['msg']
if not 'type' in data:
return
if data['type'] != 'pu':
return
phase = data['phase']
uuid = data['id']
if self.update_uuid == uuid:
# we've already done this
return
self.update_uuid = uuid # in case we become leader during this debacle
if phase == 1:
self.newpeers = data['config']
elif phase == 2 and self.newpeers:
self.oldpeers = self.peers
self.peers = self.newpeers
self.newpeers = None
def possible_update_commit(self):
# we're in an update; see if the update msg
# has committed, and go to phase 2 or finish
if not self.log.is_committed_by_uuid(self.update_uuid):
# it hasn't
return
umsg = self.log.get_by_uuid(self.update_uuid)
data = copy.deepcopy(umsg['msg'])
if data['phase'] == 1 and self.newpeers:
# the *first* phase of the update has been committed
# new leaders are guaranteed to be in the union of the
# old and new configs. now update the configuration
# to the new one only.
data['phase'] = 2
newid = uuid.uuid4().hex
self.update_uuid = newid
data['id'] = newid
self.oldpeers = self.peers
self.peers = self.newpeers
self.newpeers = None
logentry = log.logentry(self.term, newid, data)
self.log.add(logentry)
else:
# the *second* phase is now committed. tell all our
# current peers about the successful commit, drop
# the old config entirely and, if necessary, step down
self.send_ae() # send this to peers who might be about to dispeer
self.oldpeers = None
self.update_uuid = None
if not self.uuid in self.peers:
self.running = False
def all_peers(self):
for host in self.peers:
yield host
if self.newpeers:
for host in self.newpeers:
yield host
if self.oldpeers:
for host in self.oldpeers:
yield host
def valid_peer(self, uuid):
if uuid in self.peers:
return True
if self.newpeers and uuid in self.newpeers:
return True
if self.oldpeers and uuid in self.oldpeers:
return True
return False
def get_peer_addr(self, uuid):
if uuid in self.peers:
return self.peers[uuid]
if self.newpeers and uuid in self.newpeers:
return self.newpeers[uuid]
if self.oldpeers and uuid in self.oldpeers:
return self.oldpeers[uuid]
def send_to_peer(self, rpc, uuid):
self.channel.send(rpc, uuid)
def quorum(self):
peers = set(self.peers)
if self.newpeers:
peers.union(set(self.newpeers))
# oldpeers don't get a vote
# use sets because there could be dupes
np = len(peers)
return np/2 + 1
def msg_recorded(self, msg):
# we're a leader and we just got an ack from
# a follower who might have been the one to
# commit an entry
term = msg['term']
index = msg['index']
uuid = msg['id']
self.log.add_ack(index, term, uuid)
if self.log.num_acked(index) >= self.quorum() and term == self.term:
self.log.commit(index, term)
assert index >= self.commitidx
oldidx = self.commitidx
self.commitidx = index
if self.update_uuid:
# if there's an update going on, see if our commit
# is actionable
self.possible_update_commit()
# otherwise just see what messages are now runnable
self.run_committed_messages(oldidx)
def add_to_log(self, msg):
uuid = msg['id']
logentry = log.logentry(self.term, uuid, msg)
index = self.log.add(logentry)
self.save()
self.log.add_ack(index, self.term, self.uuid)
def run_committed_messages(self, oldidx):
committed = self.log.committed_logs_after_index(oldidx)
for _, val in sorted(iteritems(committed)):
msg = val['msg']
msgid = msg['id']
data = msg['data']
self.queue.put((msgid, data))
def bootstrap_cb(self, uuid, addr):
self.bootstraps.remove(addr)
self.peers[uuid] = addr
#
## rpc methods
#
def rv_rpc(self):
log_index, log_term = self.log.get_max_index_term()
rpc = {
'type': 'rv',
'term': self.term,
'id': self.uuid,
'log_index': log_index,
'log_term': log_term,
}
return msgpack.packb(rpc)
def rv_rpc_reply(self, voted):
rpc = {
'type': 'rv_reply',
'id': self.uuid,
'term': self.term,
'voted': voted
}
return msgpack.packb(rpc)
def ae_rpc(self, peeruuid, append={}):
previdx = self.next_index.get(peeruuid, self.log.maxindex())
rpc = {
'type': 'ae',
'term': self.term,
'id': self.uuid,
'previdx': previdx,
'prevterm': self.log.get_term_of(previdx),
'entries': append,
'commitidx': self.commitidx,
}
return msgpack.packb(rpc)
def ae_rpc_reply(self, index, term, success):
rpc = {
'type': 'ae_reply',
'term': term,
'id': self.uuid,
'index': index,
'success': success
}
return msgpack.packb(rpc)
def cr_rpc(self, qid, ans):
# client response RPC
# qid = query id, ans is arbitrary data
# if the qid is None, we make one up and
# return it when we ack it
rpc = {
'type': 'cr',
'id': qid,
'data': ans
}
return msgpack.packb(rpc)
def cr_rpc_ack(self, qid, info=None):
# client response RPC
# qid = query id, ans is arbitrary data
rpc = {
'type': 'cr_ack',
'id': qid,
'info': info
}
return msgpack.packb(rpc)
def cr_rdr_rpc(self, msgid):
# client response redirect; just point them
# at the master
if not self.leader:
# we don't know where to send them
raise RuntimeError
rpc = {
'type': 'cr_rdr',
'id': msgid,
'addr': self.get_peer_addr(self.leader),
'leader': self.leader
}
return msgpack.packb(rpc)
def bootstrap_rpc(self):
rpc = {
'type': 'bootstrap',
'id': self.uuid
}
return msgpack.packb(rpc)
|
[
"raft.log.logentry",
"threading.Thread.__init__",
"copy.deepcopy",
"uuid.uuid4",
"raft.log.RaftLog",
"raft.store.read_state",
"time.time",
"msgpack.unpackb",
"random.random",
"raft.tcp.start",
"msgpack.packb",
"queue.Queue"
] |
[((366, 379), 'queue.Queue', 'Queue.Queue', ([], {}), '()\n', (377, 379), True, 'import queue as Queue\n'), ((840, 870), 'raft.tcp.start', 'channel.start', (['port', 'self.uuid'], {}), '(port, self.uuid)\n', (853, 870), True, 'import raft.tcp as channel\n'), ((898, 909), 'time.time', 'time.time', ([], {}), '()\n', (907, 909), False, 'import time\n'), ((1062, 1093), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1087, 1093), False, 'import threading\n'), ((1263, 1290), 'raft.store.read_state', 'store.read_state', (['self.port'], {}), '(self.port)\n', (1279, 1290), True, 'import raft.store as store\n'), ((1310, 1327), 'raft.log.RaftLog', 'log.RaftLog', (['llog'], {}), '(llog)\n', (1321, 1327), True, 'import raft.log as log\n'), ((5089, 5100), 'time.time', 'time.time', ([], {}), '()\n', (5098, 5100), False, 'import time\n'), ((8136, 8153), 'raft.log.RaftLog', 'log.RaftLog', (['olog'], {}), '(olog)\n', (8147, 8153), True, 'import raft.log as log\n'), ((10266, 10277), 'time.time', 'time.time', ([], {}), '()\n', (10275, 10277), False, 'import time\n'), ((11211, 11222), 'time.time', 'time.time', ([], {}), '()\n', (11220, 11222), False, 'import time\n'), ((11758, 11769), 'time.time', 'time.time', ([], {}), '()\n', (11767, 11769), False, 'import time\n'), ((13879, 13905), 'copy.deepcopy', 'copy.deepcopy', (["umsg['msg']"], {}), "(umsg['msg'])\n", (13892, 13905), False, 'import copy\n'), ((16998, 17032), 'raft.log.logentry', 'log.logentry', (['self.term', 'uuid', 'msg'], {}), '(self.term, uuid, msg)\n', (17010, 17032), True, 'import raft.log as log\n'), ((17863, 17881), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (17876, 17881), False, 'import msgpack\n'), ((18078, 18096), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (18091, 18096), False, 'import msgpack\n'), ((18496, 18514), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (18509, 18514), False, 'import msgpack\n'), ((18753, 18771), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (18766, 18771), False, 'import msgpack\n'), ((19081, 19099), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (19094, 19099), False, 'import msgpack\n'), ((19340, 19358), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (19353, 19358), False, 'import msgpack\n'), ((19758, 19776), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (19771, 19776), False, 'import msgpack\n'), ((19909, 19927), 'msgpack.packb', 'msgpack.packb', (['rpc'], {}), '(rpc)\n', (19922, 19927), False, 'import msgpack\n'), ((2413, 2467), 'msgpack.unpackb', 'msgpack.unpackb', (['msg'], {'use_list': '(False)', 'encoding': '"""utf-8"""'}), "(msg, use_list=False, encoding='utf-8')\n", (2428, 2467), False, 'import msgpack\n'), ((8611, 8622), 'time.time', 'time.time', ([], {}), '()\n', (8620, 8622), False, 'import time\n'), ((14456, 14492), 'raft.log.logentry', 'log.logentry', (['self.term', 'newid', 'data'], {}), '(self.term, newid, data)\n', (14468, 14492), True, 'import raft.log as log\n'), ((6855, 6867), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6865, 6867), False, 'import uuid\n'), ((11808, 11823), 'random.random', 'random.random', ([], {}), '()\n', (11821, 11823), False, 'import random\n'), ((14237, 14249), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14247, 14249), False, 'import uuid\n'), ((3101, 3112), 'time.time', 'time.time', ([], {}), '()\n', (3110, 3112), False, 'import time\n')]
|
import unittest
from filesystem.basefile import BaseFile
import os
class TestBaseFile(unittest.TestCase):
def setUp(self):
os.mknod("/tmp/testfile")
if not os.path.isfile("/tmp/testfile"):
raise Exception("Cannot create /tmp/testfile")
self.file = BaseFile("/tmp/testfile")
self.updater = None
def update(self, caller):
self.updater = caller
def tearDown(self):
if os.path.isfile("/tmp/testfile"):
os.remove("/tmp/testfile")
def test_get_path(self):
self.assertEqual(self.file.get_path(), "/tmp/testfile")
def test_create(self):
file2 = BaseFile("testname")
self.assertIsInstance(file2, BaseFile)
self.assertEqual(file2.get_path(), "testname")
def test_create_wrong_argument_to_constructor(self):
with self.assertRaises(TypeError):
file2 = BaseFile(self.file)
with self.assertRaises(TypeError):
file2 = BaseFile(None)
with self.assertRaises(TypeError):
file2 = BaseFile(15)
def test_perform_operation(self):
return_value = self.file.perform_operation("/bin/echo")
self.assertEqual(return_value, 0)
def test_perform_operation_before_arg(self):
return_value = self.file.perform_operation("/bin/echo", ["before"])
self.assertEqual(return_value, 0)
def test_perform_operation_after_arg(self):
return_value = self.file.perform_operation("/bin/echo", None, ["after"])
self.assertEqual(return_value, 0)
def test_perform_operation_before_and_after_arg(self):
return_value = self.file.perform_operation("/bin/echo", ["before"], ["after"])
self.assertEqual(return_value, 0)
def test_perform_operation_wrong_arg(self):
return_value = self.file.perform_operation("/bin/sed")
self.assertEqual(return_value, 4)
def test_perform_operation_unknown_command(self):
with self.assertRaises(OSError):
return_value = self.file.perform_operation("dummytest")
def test_delete_file(self):
self.file.delete()
self.assertFalse(os.path.isfile(self.file.get_path()))
def test_delete_nonexistent_file(self):
file2 = BaseFile("dummytest")
with self.assertRaises(OSError) as cm:
file2.delete()
self.assertEqual(cm.exception.filename, "dummytest")
def test_attach(self):
self.file.attach(self)
self.assertEqual(len(self.file._observers), 1)
self.assertEqual(self.file._observers[0], self)
file2 = BaseFile("dummytest")
self.file.attach(file2)
self.assertEqual(len(self.file._observers), 2)
self.assertEqual(self.file._observers[1], file2)
def test_detach(self):
self.file.attach(self)
self.file.detach(self)
self.assertEqual(len(self.file._observers), 0)
def test_notify(self):
self.file.notify()
self.assertIsNone(self.updater)
self.file.attach(self)
self.file.notify()
self.assertEqual(self.updater, self.file)
self.file.detach(self)
self.updater = None
self.file.notify()
self.assertIsNone(self.updater)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.remove",
"os.mknod",
"os.path.isfile",
"filesystem.basefile.BaseFile"
] |
[((3289, 3304), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3302, 3304), False, 'import unittest\n'), ((136, 161), 'os.mknod', 'os.mknod', (['"""/tmp/testfile"""'], {}), "('/tmp/testfile')\n", (144, 161), False, 'import os\n'), ((289, 314), 'filesystem.basefile.BaseFile', 'BaseFile', (['"""/tmp/testfile"""'], {}), "('/tmp/testfile')\n", (297, 314), False, 'from filesystem.basefile import BaseFile\n'), ((448, 479), 'os.path.isfile', 'os.path.isfile', (['"""/tmp/testfile"""'], {}), "('/tmp/testfile')\n", (462, 479), False, 'import os\n'), ((666, 686), 'filesystem.basefile.BaseFile', 'BaseFile', (['"""testname"""'], {}), "('testname')\n", (674, 686), False, 'from filesystem.basefile import BaseFile\n'), ((2276, 2297), 'filesystem.basefile.BaseFile', 'BaseFile', (['"""dummytest"""'], {}), "('dummytest')\n", (2284, 2297), False, 'from filesystem.basefile import BaseFile\n'), ((2619, 2640), 'filesystem.basefile.BaseFile', 'BaseFile', (['"""dummytest"""'], {}), "('dummytest')\n", (2627, 2640), False, 'from filesystem.basefile import BaseFile\n'), ((177, 208), 'os.path.isfile', 'os.path.isfile', (['"""/tmp/testfile"""'], {}), "('/tmp/testfile')\n", (191, 208), False, 'import os\n'), ((493, 519), 'os.remove', 'os.remove', (['"""/tmp/testfile"""'], {}), "('/tmp/testfile')\n", (502, 519), False, 'import os\n'), ((910, 929), 'filesystem.basefile.BaseFile', 'BaseFile', (['self.file'], {}), '(self.file)\n', (918, 929), False, 'from filesystem.basefile import BaseFile\n'), ((993, 1007), 'filesystem.basefile.BaseFile', 'BaseFile', (['None'], {}), '(None)\n', (1001, 1007), False, 'from filesystem.basefile import BaseFile\n'), ((1071, 1083), 'filesystem.basefile.BaseFile', 'BaseFile', (['(15)'], {}), '(15)\n', (1079, 1083), False, 'from filesystem.basefile import BaseFile\n')]
|
from wordcloud import WordCloud, STOPWORDS
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Patch
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/summary_stats/summary_stats.xlsx'
output_folder = 'images/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1,1,1,1)})
# Retrieve cleaned data from HDF5
raw_data = pd.read_excel(input_path, sheetname=None)
raw_data.keys()
gender_summary = raw_data['per_gender']
gender_summary = gender_summary.drop(
[col for col in gender_summary.columns.tolist() if 'Unnamed' in col], axis=1)
# As Leadership levels were maintained separately in this table, need to map these to level 3 for 2019
# Generate data for plotting
for_plotting = gender_summary.copy().reset_index(drop=True)
males = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'm_' in col]]
males.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
males['gender'] = 'M'
females = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'f_' in col]]
females.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
females['gender'] = 'F'
for_plotting = pd.concat([males, females]).reset_index(drop=True)
for_plotting = for_plotting.groupby(['Year', 'gender', 'type_cat']).sum().drop('Rate', axis=1).reset_index()
numeric_cols = ['Year', 'type_cat', 'Applications', 'Funded', 'Amount']
for_plotting[numeric_cols] = for_plotting[numeric_cols].astype(float)
year_dict = {2015: 0, 2016: 1, 2017: 2, 2018: 3, 2019: 4}
for_plotting['Year_num'] = for_plotting['Year'].map(year_dict)
for_plotting['Amount'] = for_plotting['Amount'] / 1000000
for_plotting['proportion_Funded'] = for_plotting['Funded'] / for_plotting['Applications'] *100
total_funded = for_plotting.groupby(['Year', 'type_cat']).sum()['Funded'].to_dict()
total_amounts = for_plotting.groupby(['Year', 'type_cat']).sum()[
'Amount'].to_dict()
for_plotting['mapper'] = tuple(zip(for_plotting['Year'], for_plotting['type_cat']))
for_plotting['total_amount'] = for_plotting['mapper'].map(total_amounts)
for_plotting['total_funded'] = for_plotting['mapper'].map(total_funded)
for_plotting['proportion_amount'] = for_plotting['Amount'] / for_plotting['total_amount'] * 100
for_plotting['proportion_total_funded'] = for_plotting['Funded'] / \
for_plotting['total_funded'] * 100
# Generate plot 1
# sns.palplot(sns.color_palette("Purples"))
# fem_colour = sns.color_palette("Purples")[4]
fem_colour = '#511751'
male_colour = sns.color_palette("Oranges")[4]
col_pal = [fem_colour, male_colour]
labels = ['Female', 'Male']
df = for_plotting.groupby(['Year_num', 'gender']).sum().reset_index()
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(x='Year_num', y='Amount', data=df, hue='gender', ax=ax, palette=col_pal)
legend_elements = [Patch(facecolor=col_pal[x], label=labels[x]) for x in range(0, len(labels))]
ax.legend(handles=legend_elements, loc='upper left', title='Funding Amount', ncol=3)
ax2 = ax.twinx()
sns.lineplot(x='Year_num', y='Funded', data=df,
hue='gender', marker='o', markersize=10, palette=col_pal, ax=ax2)
ax2.set_ylim(0, 200)
# Fix all the adjusted elements
plt.legend(labels, loc='upper left', title='Number funded', ncol=3, bbox_to_anchor=(0.67, 1.0))
ax.set_xlabel('Year of funding')
ax.set_ylabel('Total funding amount ($M AUD)')
ax2.set_ylabel('Number of successful applications', rotation=-90, labelpad=15)
plt.xticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title('Total funding awarded according to gender.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_total.png', dpi=300)
plt.show()
# Generate plot 2
for level, df in for_plotting.groupby('type_cat'):
plotting = df[df['gender'] == 'F']
fig, ax = plt.subplots(figsize=(10, 4))
m = sns.barplot(orient='h', y=list(plotting['Year_num']), x=[100 for x in plotting['Year_num']], color=male_colour)
f = sns.barplot(x=plotting['proportion_total_funded'], y=plotting['Year_num'], color=fem_colour, orient='h')
# Fix all the adjusted elements
ax.set_ylabel('Year of funding')
ax.set_xlabel('Proportion of funded applications (%)')
ax2.set_ylabel('Success rate (%)', rotation=-90, labelpad=15)
plt.yticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title(f'Proportion of Fellowships awarded by gender at level {int(level)}.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
ax.axvline(50, c='#636363', linestyle='--', linewidth=3)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_proportion_level{level}.png', dpi=300)
plt.show()
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"matplotlib.pyplot.tight_layout",
"os.mkdir",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"seaborn.barplot",
"os.path.exists",
"pandas.read_excel",
"loguru.logger.info",
"numpy.arange",
"seaborn.color_palette",
"matplotlib.patches.Patch",
"matplotlib.pyplot.subplots",
"pandas.concat",
"matplotlib.pyplot.savefig"
] |
[((302, 326), 'loguru.logger.info', 'logger.info', (['"""Import OK"""'], {}), "('Import OK')\n", (313, 326), False, 'from loguru import logger\n'), ((841, 882), 'pandas.read_excel', 'pd.read_excel', (['input_path'], {'sheetname': 'None'}), '(input_path, sheetname=None)\n', (854, 882), True, 'import pandas as pd\n'), ((3259, 3288), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (3271, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3289, 3378), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Year_num"""', 'y': '"""Amount"""', 'data': 'df', 'hue': '"""gender"""', 'ax': 'ax', 'palette': 'col_pal'}), "(x='Year_num', y='Amount', data=df, hue='gender', ax=ax, palette\n =col_pal)\n", (3300, 3378), True, 'import seaborn as sns\n'), ((3573, 3690), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Year_num"""', 'y': '"""Funded"""', 'data': 'df', 'hue': '"""gender"""', 'marker': '"""o"""', 'markersize': '(10)', 'palette': 'col_pal', 'ax': 'ax2'}), "(x='Year_num', y='Funded', data=df, hue='gender', marker='o',\n markersize=10, palette=col_pal, ax=ax2)\n", (3585, 3690), True, 'import seaborn as sns\n'), ((3753, 3852), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '"""upper left"""', 'title': '"""Number funded"""', 'ncol': '(3)', 'bbox_to_anchor': '(0.67, 1.0)'}), "(labels, loc='upper left', title='Number funded', ncol=3,\n bbox_to_anchor=(0.67, 1.0))\n", (3763, 3852), True, 'import matplotlib.pyplot as plt\n'), ((4070, 4198), 'matplotlib.pyplot.title', 'plt.title', (['"""Total funding awarded according to gender."""'], {'loc': '"""left"""', 'fontdict': "{'fontsize': 15, 'fontweight': 'bold'}", 'pad': '(20)'}), "('Total funding awarded according to gender.', loc='left',\n fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)\n", (4079, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4221), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4219, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4222, 4278), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{output_folder}gender_total.png"""'], {'dpi': '(300)'}), "(f'{output_folder}gender_total.png', dpi=300)\n", (4233, 4278), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4287, 4289), True, 'import matplotlib.pyplot as plt\n'), ((427, 456), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (441, 456), False, 'import os\n'), ((462, 485), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (470, 485), False, 'import os\n'), ((3081, 3109), 'seaborn.color_palette', 'sns.color_palette', (['"""Oranges"""'], {}), "('Oranges')\n", (3098, 3109), True, 'import seaborn as sns\n'), ((3394, 3438), 'matplotlib.patches.Patch', 'Patch', ([], {'facecolor': 'col_pal[x]', 'label': 'labels[x]'}), '(facecolor=col_pal[x], label=labels[x])\n', (3399, 3438), False, 'from matplotlib.patches import Patch\n'), ((4019, 4037), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (4028, 4037), True, 'import numpy as np\n'), ((4415, 4444), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (4427, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4681), 'seaborn.barplot', 'sns.barplot', ([], {'x': "plotting['proportion_total_funded']", 'y': "plotting['Year_num']", 'color': 'fem_colour', 'orient': '"""h"""'}), "(x=plotting['proportion_total_funded'], y=plotting['Year_num'],\n color=fem_colour, orient='h')\n", (4584, 4681), True, 'import seaborn as sns\n'), ((5178, 5196), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5194, 5196), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{output_folder}gender_proportion_level{level}.png"""'], {'dpi': '(300)'}), "(f'{output_folder}gender_proportion_level{level}.png', dpi=300)\n", (5212, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5280, 5290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5288, 5290), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1771), 'pandas.concat', 'pd.concat', (['[males, females]'], {}), '([males, females])\n', (1753, 1771), True, 'import pandas as pd\n'), ((4893, 4911), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (4902, 4911), True, 'import numpy as np\n')]
|
from jk_hwriter import HWriter
from jk_rawhtml.htmlgeneral import *
from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement
from jk_rawhtml._HTMLCommentProto import _HTMLCommentProto, HTMLComment
from jk_rawhtml._HTMLRawTextProto import _HTMLRawTextProto, HTMLRawText
from jk_rawhtml._HTMLRawCSSProto import _HTMLRawCSSProto, HTMLRawCSS
from jk_rawhtml.HTML5RootElement import HTML5RootElement
from jk_rawhtml.HTML5HeadElement import HTML5HeadElement
from jk_rawhtml.HTML5Scope import HTML5Scope
from .HElement_HAbstractElementList import *
class HTMLScopeDefault(object):
spanNameBegin = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eNameB"})
spanElementName = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementName"})
spanNameEnd = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eNameE"})
spanAttributes = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttributes"})
spanAttrName = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttrName"})
spanAttrValue = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eAttrValue"})
divText = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eText"})
divTextInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eTextInline"})
divMain = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElement"})
divMainInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementInline"})
divElement = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementWrapper"})
divElementInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes={"class": "eElementWrapper"})
divChildren = _HTMLElementProto("div", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementChildren"})
divChildrenInline = _HTMLElementProto("span", tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={"class": "eElementChildrenInline"})
raw_html = _HTMLRawTextProto()
def __enter__(self):
return self
#
def __exit__(self, exc_type, exc_val, exc_tb):
pass
#
#
class HToolkit_Write_HTML(object):
@staticmethod
def writeHTMLDoc(root:HElement, w:HWriter):
assert isinstance(root, HElement)
assert isinstance(w, HWriter)
H = HTML5Scope()
scope = HTMLScopeDefault()
htmlRoot = H.html()[
H.head()[
H.raw_style_css("""
body {
font-family: 'Courier New', Courier, monospace;
font-size: 12px;
background-color: #f0f0f0;
color: #404040;
font-weight: normal;
}
.eElement {
margin-left: 20px;
}
.eElementInline {
}
.eElementChildren {
}
.eElementChildrenInline {
}
.eTextInline {
color: #006000;
background-color: #f0f8f0;
}
.eText {
margin-left: 20px;
color: #006000;
background-color: #f0f8f0;
display: block;
}
.eNameB {
color: #000060;
}
.eElementName {
background-color: #e8e8f8;
font-weight: bold;
}
.eAttrName {
font-weight: bold;
color: #008080;
}
.eAttrValue {
font-weight: bold;
color: #808000;
}
""")
],
H.body()[
HToolkit_Write_HTML.__convertElementToHTML(scope, root, False)
]
]
htmlRoot._serialize(w)
#
@staticmethod
def writeHTML(root:HElement, w:HWriter):
assert isinstance(root, HElement)
assert isinstance(w, HWriter)
scope = HTMLScopeDefault()
htmlElement = HToolkit_Write_HTML.__convertElementToHTML(scope, root, False)
htmlElement._serialize(w)
#
@staticmethod
def __convertElementToHTML(scope:HTMLScopeDefault, e:HElement, bInline:bool) -> HTMLElement:
divMain = scope.divMainInline if bInline else scope.divMain
divChildren = scope.divChildrenInline if bInline else scope.divChildren
# build attribute list
eAttrList = scope.spanAttributes()
for a in e.attributes:
if a.value:
eAttrList.children.extend([
scope.raw_html(" "),
scope.spanAttrName()[
a.name,
],
"=\"",
scope.spanAttrValue()[
a.value,
],
"\""
])
else:
eAttrList.children.extend([
scope.raw_html(" "),
scope.spanAttrName()[
a.name,
],
])
bChildsInline = e.name in [ "h1", "h2", "h3", "a", "b", "i", "img", "span", "label", "strong" ]
eChildrenList = []
for c in e.children:
if isinstance(c, HText):
if bChildsInline:
eChildrenList.append(scope.divTextInline()[
c.text
])
else:
eChildrenList.append(scope.divText()[
c.text
])
else:
eChildrenList.append(HToolkit_Write_HTML.__convertElementToHTML(scope, c, bInline or bChildsInline))
if eChildrenList:
if bChildsInline:
return divMain()[
scope.divElementInline()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
">",
],
eChildrenList,
scope.spanNameBegin()[
"</",
scope.spanElementName()[
e.name
],
],
scope.spanNameEnd()[
">",
]
]
]
else:
return divMain()[
scope.divElement()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
">",
]
],
divChildren()[
eChildrenList
],
scope.divElement()[
scope.spanNameBegin()[
"</",
scope.spanElementName()[
e.name
],
],
scope.spanNameEnd()[
">",
]
]
]
else:
return divMain()[
scope.divElement()[
scope.spanNameBegin()[
"<",
scope.spanElementName()[
e.name
],
],
eAttrList,
scope.spanNameEnd()[
" />",
]
]
]
#
#
|
[
"jk_rawhtml._HTMLRawTextProto._HTMLRawTextProto",
"jk_rawhtml._HTMLElementProto._HTMLElementProto",
"jk_rawhtml.HTML5Scope.HTML5Scope"
] |
[((617, 718), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eNameB'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eNameB'})\n", (634, 718), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((733, 840), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eElementName'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eElementName'})\n", (750, 840), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((851, 952), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eNameE'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eNameE'})\n", (868, 952), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((966, 1072), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eAttributes'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eAttributes'})\n", (983, 1072), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1084, 1188), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eAttrName'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eAttrName'})\n", (1101, 1188), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1201, 1306), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eAttrValue'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eAttrValue'})\n", (1218, 1306), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1313, 1413), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eText'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eText'})\n", (1330, 1413), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1426, 1532), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eTextInline'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eTextInline'})\n", (1443, 1532), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1539, 1640), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""div"""'], {'tagType': 'HTML_TAG_TYPE_STRUCTURE', 'extraAttributes': "{'class': 'eElement'}"}), "('div', tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={\n 'class': 'eElement'})\n", (1556, 1640), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1653, 1762), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eElementInline'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eElementInline'})\n", (1670, 1762), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1772, 1880), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""div"""'], {'tagType': 'HTML_TAG_TYPE_STRUCTURE', 'extraAttributes': "{'class': 'eElementWrapper'}"}), "('div', tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={\n 'class': 'eElementWrapper'})\n", (1789, 1880), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((1896, 2006), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_INLINE_ALL', 'extraAttributes': "{'class': 'eElementWrapper'}"}), "('span', tagType=HTML_TAG_TYPE_INLINE_ALL, extraAttributes\n ={'class': 'eElementWrapper'})\n", (1913, 2006), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((2017, 2126), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""div"""'], {'tagType': 'HTML_TAG_TYPE_STRUCTURE', 'extraAttributes': "{'class': 'eElementChildren'}"}), "('div', tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes={\n 'class': 'eElementChildren'})\n", (2034, 2126), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((2143, 2259), 'jk_rawhtml._HTMLElementProto._HTMLElementProto', '_HTMLElementProto', (['"""span"""'], {'tagType': 'HTML_TAG_TYPE_STRUCTURE', 'extraAttributes': "{'class': 'eElementChildrenInline'}"}), "('span', tagType=HTML_TAG_TYPE_STRUCTURE, extraAttributes=\n {'class': 'eElementChildrenInline'})\n", (2160, 2259), False, 'from jk_rawhtml._HTMLElementProto import _HTMLElementProto, HTMLElement\n'), ((2268, 2287), 'jk_rawhtml._HTMLRawTextProto._HTMLRawTextProto', '_HTMLRawTextProto', ([], {}), '()\n', (2285, 2287), False, 'from jk_rawhtml._HTMLRawTextProto import _HTMLRawTextProto, HTMLRawText\n'), ((2563, 2575), 'jk_rawhtml.HTML5Scope.HTML5Scope', 'HTML5Scope', ([], {}), '()\n', (2573, 2575), False, 'from jk_rawhtml.HTML5Scope import HTML5Scope\n')]
|
# Generated by Django 1.11.18 on 2019-03-04 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0006_customer_backend_id'),
]
operations = [
migrations.AddField(
model_name='customer',
name='blocked',
field=models.BooleanField(default=False),
),
]
|
[
"django.db.models.BooleanField"
] |
[((341, 375), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (360, 375), False, 'from django.db import migrations, models\n')]
|
import logging
import antlr4
from antlr4.error.ErrorListener import ErrorListener
import click
from .antlr.CLexer import CLexer
from .antlr.CParser import CParser
logger = logging.getLogger(__name__)
class MyErrorListener(ErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
if offendingSymbol.text not in ['int']: # Xunxo
raise SyntaxError(f"line {line}:{column} {msg}")
logger.info(f"Syntax Error skip: '{offendingSymbol.text}'. {e}")
def run(filepath: str):
input_stream = antlr4.FileStream(filepath)
lexer = CLexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = CParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(listener=MyErrorListener())
parser.primaryExpression() # tree
@click.command()
@click.option('--filepath', type=str, required=True)
def main(filepath):
run(filepath=filepath)
if __name__ == '__main__':
main()
|
[
"antlr4.CommonTokenStream",
"antlr4.FileStream",
"click.option",
"click.command",
"logging.getLogger"
] |
[((176, 203), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (193, 203), False, 'import logging\n'), ((828, 843), 'click.command', 'click.command', ([], {}), '()\n', (841, 843), False, 'import click\n'), ((845, 896), 'click.option', 'click.option', (['"""--filepath"""'], {'type': 'str', 'required': '(True)'}), "('--filepath', type=str, required=True)\n", (857, 896), False, 'import click\n'), ((559, 586), 'antlr4.FileStream', 'antlr4.FileStream', (['filepath'], {}), '(filepath)\n', (576, 586), False, 'import antlr4\n'), ((633, 664), 'antlr4.CommonTokenStream', 'antlr4.CommonTokenStream', (['lexer'], {}), '(lexer)\n', (657, 664), False, 'import antlr4\n')]
|
# -*- coding: utf-8 -*-
""" Print information of the users who got unassigned tickets."""
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from p3 import models as p3_models
from assopy import models as assopy_models
from optparse import make_option
### Globals
### Helpers
def conference_year(conference=settings.CONFERENCE_CONFERENCE):
return conference[-2:]
def get_all_order_tickets(conference=settings.CONFERENCE_CONFERENCE):
year = conference_year(conference)
orders = assopy_models.Order.objects.filter(_complete=True)
conf_orders = [order for order in orders if order.code.startswith('O/{}.'.format(year))]
order_tkts = [ordi.ticket
for order in conf_orders
for ordi in order.orderitem_set.all()
if ordi.ticket is not None]
conf_order_tkts = [ot for ot in order_tkts if ot.fare.code.startswith('T')]
return conf_order_tkts
def get_assigned_ticket(ticket_id):
return p3_models.TicketConference.objects.filter(ticket=ticket_id)
def has_assigned_ticket(ticket_id):
return bool(get_assigned_ticket(ticket_id))
# def is_ticket_assigned_to_someone_else(ticket, user):
# tickets = p3_models.TicketConference.objects.filter(ticket_id=ticket.id)
#
# if not tickets:
# return False
# #from IPython.core.debugger import Tracer
# #Tracer()()
# #raise RuntimeError('Could not find any ticket with ticket_id {}.'.format(ticket))
#
# if len(tickets) > 1:
# raise RuntimeError('You got more than one ticket from a ticket_id.'
# 'Tickets obtained: {}.'.format(tickets))
#
# tkt = tickets[0]
# if tkt.ticket.user_id != user.id:
# return True
#
# if not tkt.assigned_to:
# return False
#
# if tkt.assigned_to == user.email:
# return False
# else:
# return True
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--emails',
action='store_true',
dest='emails',
default=False,
help='Will print user emails.',
),
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
print('This script does not work anymore, do not use it.')
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
tkts = get_all_order_tickets(conference)
if not tkts:
raise IndexError('Could not find any tickets for conference {}.'.format(conference))
# unassigned tickets
un_tkts = [t for t in tkts if not t.p3_conference.assigned_to]
# users with unassigned tickets
users = set()
for ut in un_tkts:
users.add(ut.user)
if options['emails']:
output = sorted([usr.email.encode('utf-8') for usr in users])
else:
output = sorted([usr.get_full_name().encode('utf-8') for usr in users])
if output:
print(', '.join(output))
|
[
"assopy.models.Order.objects.filter",
"p3.models.TicketConference.objects.filter",
"django.core.management.base.CommandError",
"optparse.make_option"
] |
[((556, 606), 'assopy.models.Order.objects.filter', 'assopy_models.Order.objects.filter', ([], {'_complete': '(True)'}), '(_complete=True)\n', (590, 606), True, 'from assopy import models as assopy_models\n'), ((1056, 1115), 'p3.models.TicketConference.objects.filter', 'p3_models.TicketConference.objects.filter', ([], {'ticket': 'ticket_id'}), '(ticket=ticket_id)\n', (1097, 1115), True, 'from p3 import models as p3_models\n'), ((2058, 2168), 'optparse.make_option', 'make_option', (['"""--emails"""'], {'action': '"""store_true"""', 'dest': '"""emails"""', 'default': '(False)', 'help': '"""Will print user emails."""'}), "('--emails', action='store_true', dest='emails', default=False,\n help='Will print user emails.')\n", (2069, 2168), False, 'from optparse import make_option\n'), ((2628, 2668), 'django.core.management.base.CommandError', 'CommandError', (['"""conference not specified"""'], {}), "('conference not specified')\n", (2640, 2668), False, 'from django.core.management.base import BaseCommand, CommandError\n')]
|
from django import forms
from uploads.core.models import Document
#from uploads.core.models import File
# 創造一個依照model的form,會繼承欄位description document
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = ('description', 'document', )
class nameForm(forms.Form):
rename=forms.CharField()
# class FileForm(forms.ModelForm):
# class Meta:
# model = File
# fields = ('filename',)
# file = forms.FileField()
# pid = forms.CharField(max_length=20)
# name = forms.CharField(max_length=20)
# sex = forms.CharField()
# age = forms.IntegerField()
# mp = forms.IntegerField()
# scanType = forms.CharField(max_length=10)
# fracture = forms.IntegerField()
# tscore = forms.CharField()
# zscore = forms.CharField()
# region = forms.CharField()
# lva = forms.CharField()
# apspine = forms.CharField()
# dualfemur = forms.CharField()
# combination = forms.CharField()
|
[
"django.forms.CharField"
] |
[((316, 333), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (331, 333), False, 'from django import forms\n')]
|
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.cloudformation
from troposphere.cloudformation import (
InitFileContext as _InitFileContext,
Tags as _Tags,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class Stack(troposphere.cloudformation.Stack, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
TemplateURL=REQUIRED, # type: Union[str, AWSHelperFn]
NotificationARNs=NOTHING, # type: List[Union[str, AWSHelperFn]]
Parameters=NOTHING, # type: dict
Tags=NOTHING, # type: Union[_Tags, list]
TimeoutInMinutes=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
TemplateURL=TemplateURL,
NotificationARNs=NotificationARNs,
Parameters=Parameters,
Tags=Tags,
TimeoutInMinutes=TimeoutInMinutes,
**kwargs
)
super(Stack, self).__init__(**processed_kwargs)
class WaitCondition(troposphere.cloudformation.WaitCondition, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Count=NOTHING, # type: int
Handle=NOTHING, # type: Union[str, AWSHelperFn]
Timeout=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Count=Count,
Handle=Handle,
Timeout=Timeout,
**kwargs
)
super(WaitCondition, self).__init__(**processed_kwargs)
class WaitConditionHandle(troposphere.cloudformation.WaitConditionHandle, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
**kwargs
)
super(WaitConditionHandle, self).__init__(**processed_kwargs)
class InitFile(troposphere.cloudformation.InitFile, Mixin):
def __init__(self,
title=None,
content=NOTHING, # type: Union[str, AWSHelperFn]
mode=NOTHING, # type: Union[str, AWSHelperFn]
owner=NOTHING, # type: Union[str, AWSHelperFn]
encoding=NOTHING, # type: str
group=NOTHING, # type: Union[str, AWSHelperFn]
source=NOTHING, # type: Union[str, AWSHelperFn]
authentication=NOTHING, # type: Union[str, AWSHelperFn]
context=NOTHING, # type: _InitFileContext
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
content=content,
mode=mode,
owner=owner,
encoding=encoding,
group=group,
source=source,
authentication=authentication,
context=context,
**kwargs
)
super(InitFile, self).__init__(**processed_kwargs)
class InitService(troposphere.cloudformation.InitService, Mixin):
def __init__(self,
title=None,
ensureRunning=NOTHING, # type: bool
enabled=NOTHING, # type: bool
files=NOTHING, # type: list
packages=NOTHING, # type: dict
sources=NOTHING, # type: list
commands=NOTHING, # type: list
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ensureRunning=ensureRunning,
enabled=enabled,
files=files,
packages=packages,
sources=sources,
commands=commands,
**kwargs
)
super(InitService, self).__init__(**processed_kwargs)
class InitConfig(troposphere.cloudformation.InitConfig, Mixin):
def __init__(self,
title=None,
groups=NOTHING, # type: dict
users=NOTHING, # type: dict
sources=NOTHING, # type: dict
packages=NOTHING, # type: dict
files=NOTHING, # type: dict
commands=NOTHING, # type: dict
services=NOTHING, # type: dict
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
groups=groups,
users=users,
sources=sources,
packages=packages,
files=files,
commands=commands,
services=services,
**kwargs
)
super(InitConfig, self).__init__(**processed_kwargs)
class AuthenticationBlock(troposphere.cloudformation.AuthenticationBlock, Mixin):
def __init__(self,
title=None,
accessKeyId=NOTHING, # type: Union[str, AWSHelperFn]
buckets=NOTHING, # type: List[Union[str, AWSHelperFn]]
password=NOTHING, # type: Union[str, AWSHelperFn]
secretKey=NOTHING, # type: Union[str, AWSHelperFn]
type=NOTHING, # type: Any
uris=NOTHING, # type: List[Union[str, AWSHelperFn]]
username=NOTHING, # type: Union[str, AWSHelperFn]
roleName=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
accessKeyId=accessKeyId,
buckets=buckets,
password=password,
secretKey=secretKey,
type=type,
uris=uris,
username=username,
roleName=roleName,
**kwargs
)
super(AuthenticationBlock, self).__init__(**processed_kwargs)
|
[
"troposphere_mate.core.mate.preprocess_init_kwargs"
] |
[((1153, 1383), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'template': 'template', 'validation': 'validation', 'TemplateURL': 'TemplateURL', 'NotificationARNs': 'NotificationARNs', 'Parameters': 'Parameters', 'Tags': 'Tags', 'TimeoutInMinutes': 'TimeoutInMinutes'}), '(title=title, template=template, validation=\n validation, TemplateURL=TemplateURL, NotificationARNs=NotificationARNs,\n Parameters=Parameters, Tags=Tags, TimeoutInMinutes=TimeoutInMinutes, **\n kwargs)\n', (1175, 1383), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((1981, 2118), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'template': 'template', 'validation': 'validation', 'Count': 'Count', 'Handle': 'Handle', 'Timeout': 'Timeout'}), '(title=title, template=template, validation=\n validation, Count=Count, Handle=Handle, Timeout=Timeout, **kwargs)\n', (2003, 2118), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((2566, 2658), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'template': 'template', 'validation': 'validation'}), '(title=title, template=template, validation=\n validation, **kwargs)\n', (2588, 2658), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((3452, 3642), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'content': 'content', 'mode': 'mode', 'owner': 'owner', 'encoding': 'encoding', 'group': 'group', 'source': 'source', 'authentication': 'authentication', 'context': 'context'}), '(title=title, content=content, mode=mode, owner=owner,\n encoding=encoding, group=group, source=source, authentication=\n authentication, context=context, **kwargs)\n', (3474, 3642), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((4286, 4455), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'ensureRunning': 'ensureRunning', 'enabled': 'enabled', 'files': 'files', 'packages': 'packages', 'sources': 'sources', 'commands': 'commands'}), '(title=title, ensureRunning=ensureRunning, enabled=\n enabled, files=files, packages=packages, sources=sources, commands=\n commands, **kwargs)\n', (4308, 4455), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((5114, 5284), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'groups': 'groups', 'users': 'users', 'sources': 'sources', 'packages': 'packages', 'files': 'files', 'commands': 'commands', 'services': 'services'}), '(title=title, groups=groups, users=users, sources=\n sources, packages=packages, files=files, commands=commands, services=\n services, **kwargs)\n', (5136, 5284), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n'), ((6168, 6364), 'troposphere_mate.core.mate.preprocess_init_kwargs', 'preprocess_init_kwargs', ([], {'title': 'title', 'accessKeyId': 'accessKeyId', 'buckets': 'buckets', 'password': 'password', 'secretKey': 'secretKey', 'type': 'type', 'uris': 'uris', 'username': 'username', 'roleName': 'roleName'}), '(title=title, accessKeyId=accessKeyId, buckets=\n buckets, password=password, secretKey=secretKey, type=type, uris=uris,\n username=username, roleName=roleName, **kwargs)\n', (6190, 6364), False, 'from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin\n')]
|
from fastapi import APIRouter
from src.controllers import healthcheck
router = APIRouter(
prefix="/healthcheck",
tags=["healthcheck"]
)
@router.get("", name="Health check", response_model=str)
async def runGetHealthCheck():
return healthcheck.getHealthCheck()
|
[
"src.controllers.healthcheck.getHealthCheck",
"fastapi.APIRouter"
] |
[((80, 134), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/healthcheck"""', 'tags': "['healthcheck']"}), "(prefix='/healthcheck', tags=['healthcheck'])\n", (89, 134), False, 'from fastapi import APIRouter\n'), ((246, 274), 'src.controllers.healthcheck.getHealthCheck', 'healthcheck.getHealthCheck', ([], {}), '()\n', (272, 274), False, 'from src.controllers import healthcheck\n')]
|
# -*- coding: UTF-8 -*-
import collections
import math
import md5
from nose.tools import nottest
import raco.algebra
import raco.fakedb
import raco.myrial.interpreter as interpreter
import raco.scheme as scheme
import raco.myrial.groupby
import raco.myrial.myrial_test as myrial_test
from raco.algebra import Apply
from raco import types
from raco.myrial.exceptions import *
from raco.expression import NestedAggregateException
from raco.fake_data import FakeData
from raco.types import LONG_TYPE
class TestQueryFunctions(myrial_test.MyrialTestCase, FakeData):
def setUp(self):
super(TestQueryFunctions, self).setUp()
self.db.add_function(TestQueryFunctions.test_function)
self.db.ingest(TestQueryFunctions.emp_key,
TestQueryFunctions.emp_table,
TestQueryFunctions.emp_schema)
self.db.ingest(TestQueryFunctions.dept_key,
TestQueryFunctions.dept_table,
TestQueryFunctions.dept_schema)
self.db.ingest(TestQueryFunctions.numbers_key,
TestQueryFunctions.numbers_table,
TestQueryFunctions.numbers_schema)
def test_scan_emp(self):
query = """
emp = SCAN(%s);
STORE(emp, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_scan_dept(self):
query = """
dept = SCAN(%s);
STORE(dept, OUTPUT);
""" % self.dept_key
self.check_result(query, self.dept_table)
def test_bag_comp_emit_star(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT *];
STORE(bc, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_bag_comp_emit_table_wildcard(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT emp.*];
STORE(bc, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_hybrid_emit_clause(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
x = [FROM dept, emp as X EMIT 5, X.salary * 2 AS k, X.*, *];
STORE(x, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = [(5, e[3] * 2) + e + d + e for e in self.emp_table
for d in self.dept_table]
self.check_result(query, collections.Counter(expected))
salary_filter_query = """
emp = SCAN(%s);
rich = [FROM emp WHERE %s > 25 * 10 * 10 * (5 + 5) EMIT *];
STORE(rich, OUTPUT);
"""
salary_expected_result = collections.Counter(
[x for x in FakeData.emp_table.elements() if x[3] > 25000])
def test_bag_comp_filter_large_salary_by_name(self):
query = TestQueryFunctions.salary_filter_query % (self.emp_key,
'salary')
self.check_result(query, TestQueryFunctions.salary_expected_result)
def test_bag_comp_filter_large_salary_by_position(self):
query = TestQueryFunctions.salary_filter_query % (self.emp_key, '$3')
self.check_result(query, TestQueryFunctions.salary_expected_result)
def test_bag_comp_filter_empty_result(self):
query = """
emp = SCAN(%s);
poor = [FROM emp WHERE $3 < (5 * 2) EMIT *];
STORE(poor, OUTPUT);
""" % self.emp_key
expected = collections.Counter()
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ge(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 >= $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] >= x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ge2(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 ≥ $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] >= x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_le(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $1 <= 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] <= 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_le2(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE $1 ≤ 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] <= 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_gt(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE 2 * $1 > $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if 2 * x[1] > x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_lt(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $1 < 2 * $0 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[1] < 2 * x[0]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_eq(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 * 2 == $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] * 2 == x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 != $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne2(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 <> $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_column_compare_ne3(self):
query = u"""
emp = SCAN(%s);
out = [FROM emp WHERE $0 // $1 ≠ $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] / x[1] != x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_minus(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $0 + -$1 == $1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[0] - x[1] == x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_and(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 AND id > dept_id EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 and
x[0] > x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_or(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE $3 > 25 * 1000 OR id > dept_id EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] > 25000 or
x[0] > x[1]])
self.check_result(query, expected)
def test_bag_comp_filter_not(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE not salary > 25000 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if not x[3] > 25000])
self.check_result(query, expected)
def test_bag_comp_filter_or_and(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 OR salary == 5000 AND
dept_id == 1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 or
(x[3] == 5000 and x[1] == 1)])
self.check_result(query, expected)
def test_bag_comp_filter_or_and_not(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE salary == 25000 OR NOT salary == 5000 AND
dept_id == 1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000 or not
x[3] == 5000 and x[1] == 1])
self.check_result(query, expected)
def test_bag_comp_emit_columns(self):
query = """
emp = SCAN(%s);
out = [FROM emp WHERE dept_id == 1 EMIT $2, salary AS salary];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[2], x[3]) for x in self.emp_table.elements() if x[1] == 1])
self.check_result(query, expected)
def test_bag_comp_emit_literal(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT salary, "bugga bugga"];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3], "bugga bugga") for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_bag_comp_emit_with_math(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT salary + 5000, salary - 5000, salary // 5000,
salary * 5000];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3] + 5000, x[3] - 5000, x[3] / 5000, x[3] * 5000)
for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_bag_comp_rename(self):
query = """
emp = SCAN(%s);
out = [FROM emp EMIT name, salary * 2 AS double_salary];
out = [FROM out WHERE double_salary > 10000 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[2], x[3] * 2) for x in self.emp_table.elements() if
x[3] * 2 > 10000])
self.check_result(query, expected)
join_expected = collections.Counter(
[('<NAME>', 'human resources'),
('<NAME>', 'accounting'),
('<NAME>', 'accounting'),
('<NAME>', 'human resources'),
('<NAME>', 'accounting'),
('<NAME>', 'engineering'),
('<NAME>', 'accounting')])
def test_explicit_join_unicode(self):
query = u"""
emp = SCAN(%s);
dept = SCAN(%s);
out = JOIN(emp, dept_id, dept, id);
out2 = [FROM out EMIT $2 AS emp_name, $5 AS dept_name];
STORE(out2, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_explicit_join(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = JOIN(emp, dept_id, dept, id);
out2 = [FROM out EMIT $2 AS emp_name, $5 AS dept_name];
STORE(out2, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_explicit_join_twocols(self):
query = """
query = [1 as dept_id, 25000 as salary];
emp = SCAN({emp});
out = JOIN(query, (dept_id, salary), emp, (dept_id, salary));
out2 = [FROM out EMIT name];
STORE(out2, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter([('<NAME>',),
('<NAME>',)])
self.check_result(query, expected)
def test_bagcomp_join_via_names(self):
query = """
out = [FROM SCAN(%s) E, SCAN(%s) AS D WHERE E.dept_id == D.id
EMIT E.name AS emp_name, D.name AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_bagcomp_join_via_pos(self):
query = """
E = SCAN(%s);
D = SCAN(%s);
out = [FROM E, D WHERE E.$1 == D.$0
EMIT E.name AS emp_name, D.$1 AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_two_column_join(self):
query = """
D = [1 as dept_id, 25000 as salary];
out = [FROM D, SCAN({emp}) E
WHERE E.dept_id == D.dept_id AND E.salary == D.salary
EMIT E.name AS emp_name];
STORE(out, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter([('<NAME>',),
('<NAME>',)])
self.check_result(query, expected)
def test_join_with_select(self):
query = """
out = [FROM SCAN(%s) AS D, SCAN(%s) E
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name AS emp_name, D.name AS dept_name];
STORE(out, OUTPUT);
""" % (self.dept_key, self.emp_key)
expected = collections.Counter([('<NAME>', 'accounting'),
('<NAME>', 'human resources')])
self.check_result(query, expected)
def test_join_with_reordering(self):
# Try both FROM orders of the query and verify they both get the
# correct answer.
query = """
out = [FROM SCAN({d}) AS D, SCAN({e}) E
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name, D.id];
STORE(out, OUTPUT);
""".format(d=self.dept_key, e=self.emp_key)
expected = collections.Counter([('<NAME>', 1),
('<NAME>', 2)])
self.check_result(query, expected)
# Swap E and D
query = """
out = [FROM SCAN({e}) E, SCAN({d}) AS D
WHERE E.dept_id == D.id AND E.salary < 6000
EMIT E.name, D.id];
STORE(out, OUTPUT);
""".format(d=self.dept_key, e=self.emp_key)
expected = collections.Counter([('<NAME>', 1),
('<NAME>', 2)])
self.check_result(query, expected)
def test_sql_join(self):
"""SQL-style select-from-where join"""
query = """
E = SCAN(%s);
D = SCAN(%s);
out = SELECT E.name, D.name FROM E, D WHERE E.dept_id = D.id;
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
self.check_result(query, self.join_expected)
def test_bagcomp_nested_sql(self):
"""Test nesting SQL inside a bag comprehension"""
query = """
out = [FROM (SELECT name, salary
FROM SCAN(%s) AS X
WHERE salary > 5000) AS Y
WHERE salary < 80000
EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_sql_nested_sql(self):
"""Test nesting SQL inside SQL"""
query = """
out = SELECT Y.name, Y.salary
FROM (SELECT name, salary
FROM SCAN(%s) AS X
WHERE salary > 5000) AS Y
WHERE Y.salary < 80000;
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_sql_nested_bagcomp(self):
"""Test nesting a bag comprehension inside SQL"""
query = """
out = SELECT Y.name, Y.salary FROM
[FROM SCAN(%s) AS X WHERE salary > 5000 EMIT X.*] AS Y
WHERE Y.salary < 80000;
STORE(out, OUTPUT);
""" % (self.emp_key,)
tuples = [(e[2], e[3]) for e in self.emp_table.elements()
if e[3] < 80000 and e[3] > 5000]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_bagcomp_projection(self):
"""Test that column names are preserved across projection."""
query = """
E = SCAN(%s);
F = [FROM E EMIT $2];
out = [FROM F EMIT name];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tpls = [tuple([x[2]]) for x in self.emp_table]
expected = collections.Counter(tpls)
self.check_result(query, expected)
def test_bagcomp_no_column_name(self):
"""Test that the system handles an omitted output column name."""
query = """
E = SCAN(%s);
F = [FROM E EMIT salary*E.salary];
out = [FROM F EMIT $0];
STORE(out, OUTPUT);
""" % (self.emp_key,)
tpls = [tuple([x[3] * x[3]]) for x in self.emp_table]
expected = collections.Counter(tpls)
self.check_result(query, expected)
def test_explicit_cross(self):
query = """
out = CROSS(SCAN(%s), SCAN(%s));
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
tuples = [e + d for e in self.emp_table.elements() for
d in self.dept_table.elements()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_bagcomp_cross(self):
query = """
out = [FROM SCAN(%s) E, SCAN(%s) AS D EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
tuples = [e + d for e in self.emp_table.elements() for
d in self.dept_table.elements()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_distinct(self):
query = """
out = DISTINCT([FROM SCAN(%s) AS X EMIT salary]);
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(25000,), (5000,), (90000,)])
self.check_result(query, expected)
def test_sql_distinct(self):
query = """
out = SELECT DISTINCT salary AS salary FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(set([(x[3],) for x in self.emp_table]))
self.check_result(query, expected)
def test_sql_repeated(self):
query = """
out = SELECT salary AS salary FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(x[3],) for x in self.emp_table])
self.check_result(query, expected)
def test_limit_without_orderby_assert(self):
query = """
out = LIMIT(SCAN(%s), 3);
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(Exception): # noqa
self.check_result(query, None)
def test_orderby_without_limit_assert(self):
query = """
out = SELECT * FROM SCAN(%s) as X ORDER BY $0;
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(Exception): # noqa
self.check_result(query, None)
def test_limit_orderby(self):
query = """
out = [FROM SCAN(%s) as X EMIT * ORDER BY $0 ASC LIMIT 3];
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
expectedResult = collections.Counter(
sorted(self.emp_table.elements(), key=lambda emp: emp[0])[:3])
self.assertEquals(result, expectedResult)
def test_sql_limit_orderby(self):
query = """
out = SELECT * FROM SCAN(%s) as X ORDER BY $0 ASC LIMIT 3;
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
expectedResult = collections.Counter(
sorted(self.emp_table.elements(), key=lambda emp: emp[0])[:3])
self.assertEquals(result, expectedResult)
def test_limit_orderby_multikey(self):
query = """
out = [FROM SCAN(%s) as X EMIT *
ORDER BY $1 ASC, $3 DESC, $2 ASC
LIMIT 3];
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
firstSort = sorted(self.emp_table.elements(), key=lambda emp: emp[2])
secondSort = sorted(firstSort, key=lambda emp: emp[3], reverse=True)
thirdSortLimit = sorted(secondSort, key=lambda emp: emp[1])[:3]
expectedResult = collections.Counter(thirdSortLimit)
self.assertEquals(result, expectedResult)
def test_sql_limit_orderby_multikey(self):
query = """
out = SELECT * FROM SCAN(%s) as X
ORDER BY $1 ASC, $3 DESC, $2 ASC
LIMIT 3;
STORE(out, OUTPUT);
""" % self.emp_key
result = self.execute_query(query)
firstSort = sorted(self.emp_table.elements(), key=lambda emp: emp[2])
secondSort = sorted(firstSort, key=lambda emp: emp[3], reverse=True)
thirdSortLimit = sorted(secondSort, key=lambda emp: emp[1])[:3]
expectedResult = collections.Counter(thirdSortLimit)
self.assertEquals(result, expectedResult)
def test_table_literal_boolean(self):
query = """
X = [truE as MyTrue, FaLse as MyFalse];
Y = [FROM scan(%s) as E, X where X.MyTrue emit *];
STORE(Y, OUTPUT);
""" % self.emp_key
res = [x + (True, False) for x in self.emp_table]
self.check_result(query, collections.Counter(res))
def test_table_literal_scalar_expression(self):
query = """
X = [FROM ["Andrew", (50 * (500 + 500)) AS salary] Z EMIT salary];
STORE(X, OUTPUT);
"""
expected = collections.Counter([(50000,)])
self.check_result(query, expected)
def test_table_literal_unbox(self):
query = """
A = [1 AS one, 2 AS two, 3 AS three];
B = [1 AS one, 2 AS two, 3 AS three];
C = [*A.two * *B.three];
STORE(C, OUTPUT);
"""
expected = collections.Counter([(6,)])
self.check_result(query, expected)
def test_unbox_from_where_single(self):
query = """
TH = [25 * 1000];
emp = SCAN(%s);
out = [FROM emp WHERE $3 > *TH EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] > 25000])
self.check_result(query, expected)
def test_unbox_from_where_multi(self):
query = """
TWO = [2];
FOUR = [4];
EIGHT = [8];
emp = SCAN(%s);
out = [FROM emp WHERE *EIGHT == *TWO**FOUR EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_unbox_from_where_nary_name(self):
query = """
_CONST = [25 AS twenty_five, 1000 AS thousand];
emp = SCAN(%s);
out = [FROM emp WHERE salary == *_CONST.twenty_five *
*_CONST.thousand EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000])
self.check_result(query, expected)
def test_unbox_from_where_nary_pos(self):
query = """
_CONST = [25 AS twenty_five, 1000 AS thousand];
emp = SCAN(%s);
out = [FROM emp WHERE salary == *_CONST.$0 *
*_CONST.$1 EMIT *];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[x for x in self.emp_table.elements() if x[3] == 25000])
self.check_result(query, expected)
def test_unbox_from_emit_single(self):
query = """
THOUSAND = [1000];
emp = SCAN(%s);
out = [FROM emp EMIT salary * *THOUSAND AS salary];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[3] * 1000,) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_unbox_kitchen_sink(self):
query = """
C1 = [25 AS a, 100 AS b];
C2 = [50 AS a, 1000 AS b];
emp = SCAN(%s);
out = [FROM emp WHERE salary==*C1.a * *C2.b OR $3==*C1.b * *C2
EMIT dept_id * *C1.b // *C2.a];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[1] * 2,) for x in self.emp_table.elements() if
x[3] == 5000 or x[3] == 25000])
self.check_result(query, expected)
def test_unbox_arbitrary_expression(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = [FROM emp, COUNTALL(dept) as size WHERE id > *size EMIT emp.id];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements() if
x[0] > len(self.dept_table)])
self.check_result(query, expected)
def test_inline_table_literal(self):
query = """
emp = SCAN(%s);
dept = SCAN(%s);
out = [FROM emp, [1,2,3] as tl WHERE id > tl.$2 EMIT emp.id];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements() if
x[0] > 3])
self.check_result(query, expected)
def __aggregate_expected_result(self, apply_func, grouping_col=1,
agg_col=3):
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[grouping_col]].append(t[agg_col])
tuples = [(key, apply_func(values)) for key, values in
result_dict.iteritems()]
return collections.Counter(tuples)
def test_max(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, MAX(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(max))
def test_min(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, MIN(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(min))
def test_sum(self):
query = """
out = [FROM SCAN(%s) as X EMIT dept_id, SUM(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(sum))
def test_avg(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, AVG(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def avg(it):
sum = 0
cnt = 0
for val in it:
sum += val
cnt += 1
return sum / cnt
self.check_result(query, self.__aggregate_expected_result(avg))
self.check_result(query, self.__aggregate_expected_result(avg),
test_logical=True)
def test_stdev(self):
query = """
out = [FROM SCAN(%s) AS X EMIT STDEV(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
res = self.execute_query(query)
tp = res.elements().next()
self.assertAlmostEqual(tp[0], 34001.8006726)
res = self.execute_query(query, test_logical=True)
tp = res.elements().next()
self.assertAlmostEqual(tp[0], 34001.8006726)
def test_count(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNT(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_countall(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNTALL()];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_count_star(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, COUNT(*)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_count_star_sql(self):
query = """
out = SELECT dept_id, COUNT(*) FROM SCAN(%s) AS X;
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
self.check_result(query, self.__aggregate_expected_result(len),
test_logical=True)
def test_max_reversed(self):
query = """
out = [FROM SCAN(%s) AS X EMIT MAX(salary) AS max_salary, dept_id];
STORE(out, OUTPUT);
""" % self.emp_key
ex = self.__aggregate_expected_result(max)
ex = collections.Counter([(y, x) for (x, y) in ex])
self.check_result(query, ex)
self.check_result(query, ex, test_logical=True)
def test_compound_aggregate(self):
query = """
out = [FROM SCAN(%s) AS X
EMIT (2 * (MAX(salary) - MIN(salary))) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, 2 * (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
self.check_result(query, expected, test_logical=True)
def test_aggregate_with_unbox(self):
query = """
C = [1 AS one, 2 AS two];
out = [FROM SCAN(%s) AS X
EMIT MAX(*C.two * salary) - MIN( *C.$1 * salary) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(2 * t[3])
tuples = [(key, (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
self.check_result(query, expected, test_logical=True)
def test_nary_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id, salary, COUNT(name)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[(t[1], t[3])].append(t[2])
tuples = [key + (len(values),)
for key, values in result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_empty_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT MAX(salary), COUNT($0), MIN(dept_id*4)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(90000, len(self.emp_table), 4)])
self.check_result(query, expected)
def test_compound_groupby(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id+dept_id, AVG(salary), COUNT(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[0] + t[1]].append(t[3])
tuples1 = [(key, sum(values), len(values)) for key, values
in result_dict.iteritems()]
tuples2 = [(t[0], t[1] / t[2], t[2]) for t in tuples1]
expected = collections.Counter(tuples2)
self.check_result(query, expected)
def test_impure_aggregate_colref(self):
"""Test of aggregate column that refers to a grouping column"""
query = """
out = [FROM SCAN(%s) AS X EMIT
( X.dept_id + (MAX(X.salary) - MIN(X.salary))) AS val,
X.dept_id AS did];
out = [FROM out EMIT did AS dept_id, val AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, key + (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_impure_aggregate_unbox(self):
"""Test of an aggregate column that contains an unbox."""
query = """
TWO = [2];
out = [FROM SCAN(%s) AS X
EMIT (*TWO * (MAX(salary) - MIN(salary))) AS range,
dept_id AS did];
out = [FROM out EMIT did AS dept_id, range AS rng];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = [(key, 2 * (max(values) - min(values))) for key, values in
result_dict.iteritems()]
expected = collections.Counter(tuples)
self.check_result(query, expected)
def test_aggregate_illegal_colref(self):
query = """
out = [FROM SCAN(%s) AS X EMIT
X.dept_id + COUNT(X.salary) AS val];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(raco.myrial.groupby.NonGroupedAccessException): # noqa
self.check_result(query, None)
def test_nested_aggregates_are_illegal(self):
query = """
out = [FROM SCAN(%s) AS X
EMIT id+dept_id, MIN(53 + MAX(salary)) AS foo];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, collections.Counter())
def test_standalone_countall(self):
query = """
out = COUNTALL(SCAN(%s));
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(len(self.emp_table),)])
self.check_result(query, expected)
def test_multiway_bagcomp_with_unbox(self):
"""Return all employees in accounting making less than 30000"""
query = """
Salary = [30000];
Dept = ["accounting"];
out = [FROM SCAN(%s) AS E, SCAN(%s) AS D
WHERE E.dept_id == D.id AND D.name == *Dept
AND E.salary < *Salary EMIT E.$2 AS name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
expected = collections.Counter([
("<NAME>",),
("<NAME>",),
("<NAME>",)])
self.check_result(query, expected)
def test_duplicate_bagcomp_aliases_are_illegal(self):
query = """
X = SCAN(%s);
out = [FROM X, X EMIT *];
STORE(out, OUTPUT);
""" % (self.emp_key,)
with self.assertRaises(interpreter.DuplicateAliasException):
self.check_result(query, collections.Counter())
def test_bagcomp_column_index_out_of_bounds(self):
query = """
E = SCAN(%s);
D = SCAN(%s);
out = [FROM E, D WHERE E.$1 == D.$77
EMIT E.name AS emp_name, D.$1 AS dept_name];
STORE(out, OUTPUT);
""" % (self.emp_key, self.dept_key)
with self.assertRaises(ColumnIndexOutOfBounds):
self.check_result(query, collections.Counter())
def test_abs(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, ABS(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, abs(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_ceil(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, CEIL(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.ceil(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_cos(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, COS(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.cos(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_floor(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, FLOOR(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.floor(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_log(self):
query = """
out = [FROM SCAN(%s) AS X WHERE val > 0 EMIT id, LOG(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.log(b)) for a, b in self.numbers_table.elements()
if b > 0])
self.check_result(query, expected)
def test_sin(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, SIN(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.sin(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_sqrt(self):
query = """
out = [FROM SCAN(%s) X WHERE val >= 0 EMIT id, SQRT(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.sqrt(b)) for a, b in self.numbers_table.elements()
if b >= 0])
self.check_result(query, expected)
def test_tan(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, TAN(val)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, math.tan(b)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_md5(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, md5(name)];
STORE(out, OUTPUT);
""" % self.emp_key
def md5_as_long(x):
m = md5.new()
m.update(x)
return int(m.hexdigest(), 16) >> 64
expected = collections.Counter(
[(x[0], md5_as_long(x[2])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_pow(self):
query = """
THREE = [3];
out = [FROM SCAN(%s) X EMIT id, POW(X.val, *THREE)];
STORE(out, OUTPUT);
""" % self.numbers_key
expected = collections.Counter(
[(a, pow(b, 3)) for a, b in self.numbers_table.elements()])
self.check_result(query, expected)
def test_no_such_relation(self):
query = """
out = [FROM SCAN(foo:bar:baz) x EMIT id, TAN(val)];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_relation_name(self):
query = """
y = empty(a:int);
z = [from s y -- bug: s does not exist
emit y.a];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_alias(self):
query = """
y = empty(a:int);
z = [from y s -- bug: extra s
emit y.a];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_bad_alias_wildcard(self):
query = """
y = empty(a:int);
z = [from y s -- bug: errant s
emit y.*];
store(z, debug);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_scan_error(self):
query = """
out = [FROM SCAN(%s) AS X EMIT id, !!!FROG(val)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, collections.Counter())
def test_relation_scope_error(self):
query = """
out = [FROM EMPTY(x:INT) AS X EMIT z.*];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_relation_scope_error2(self):
query = """
z = EMPTY(z:INT);
out = [FROM EMPTY(x:INT) AS X EMIT z.*];
STORE(out, OUTPUT);
"""
with self.assertRaises(NoSuchRelationException):
self.check_result(query, collections.Counter())
def test_parse_error(self):
query = """
out = [FROM SCAN(%s) AS X EMIT $(val)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, collections.Counter())
def test_no_such_udf(self):
query = """
out = [FROM SCAN(%s) AS X EMIT FooFunction(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NoSuchFunctionException):
self.check_result(query, collections.Counter())
def test_reserved_udf(self):
query = """
DEF avg(x, y): (x + y) / 2;
out = [FROM SCAN(%s) AS X EMIT avg(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(ReservedTokenException):
self.check_result(query, collections.Counter())
def test_duplicate_udf(self):
query = """
DEF foo(x, y): x + y;
DEF bar(): 7;
DEF foo(x): -1 * x;
out = [FROM SCAN(%s) AS X EMIT foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(DuplicateFunctionDefinitionException):
self.check_result(query, collections.Counter())
def test_invalid_argument_udf(self):
query = """
DEF Foo(x, y): cos(x) * sin(y);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(InvalidArgumentList):
self.check_result(query, collections.Counter())
def test_undefined_variable_udf(self):
query = """
DEF Foo(x, y): cos(x) * sin(z);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(UndefinedVariableException):
self.check_result(query, collections.Counter())
def test_duplicate_variable_udf(self):
query = """
DEF Foo(x, x): cos(x) * sin(x);
out = [FROM SCAN(%s) AS X EMIT Foo(X.salary, X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(DuplicateVariableException):
self.check_result(query, collections.Counter())
def test_nary_udf(self):
query = """
DEF Foo(a,b): [a + b, a - b];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[1] + t[3], t[3] - t[1])
for t in self.emp_table])
self.check_result(query, expected)
def test_nary_udf_name_count(self):
query = """
DEF Foo(a,b): [a + b, a - b];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y, z]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_nary_udf_illegal_nesting(self):
query = """
DEF Foo(x): [x + 3, x - 3];
DEF Bar(a,b): [Foo(x), Foo(b)];
out = [FROM SCAN(%s) AS X EMIT id, Bar(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_nary_udf_illegal_wildcard(self):
query = """
DEF Foo(x): [x + 3, *];
out = [FROM SCAN(%s) AS X EMIT id, Foo(salary, dept_id) as [x, y]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalWildcardException):
self.check_result(query, None)
def test_triangle_udf(self):
query = """
DEF Triangle(a,b): (a*b)//2;
out = [FROM SCAN(%s) AS X EMIT id, Triangle(X.salary, dept_id) AS t];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[1] * t[3] / 2) for t in self.emp_table]) # noqa
self.check_result(query, expected)
def test_noop_udf(self):
expr = "30 + 15 // 7 + -45"
query = """
DEF Noop(): %s;
out = [Noop() AS t];
STORE(out, OUTPUT);
""" % expr
val = eval(expr)
expected = collections.Counter([(val,)])
self.check_result(query, expected)
def test_const(self):
expr = "30 + 15 // 7 + -45"
query = """
CONST myconstant: %s;
out = [myconstant AS t];
STORE(out, OUTPUT);
""" % expr
val = eval(expr)
expected = collections.Counter([(val,)])
self.check_result(query, expected)
def test_composition_udf(self):
query = """
DEF Add7(x): x + 7;
DEF Add6(x): x + 6;
out = [FROM SCAN(%s) AS X EMIT id, Add6(Add7(Add6(X.salary)))];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[3] + 19)
for t in self.emp_table])
self.check_result(query, expected)
def test_nested_udf(self):
query = """
DEF Add7(x): x + 7;
DEF Add10(x): Add7(x) + 3;
out = [FROM SCAN(%s) AS X EMIT id, Add10(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(t[0], t[3] + 10)
for t in self.emp_table])
self.check_result(query, expected)
def test_regression_150(self):
"""Repeated invocation of a UDF."""
query = """
DEF transform(x): pow(10, x/pow(2,16)*3.5);
out = [FROM SCAN(%s) AS X EMIT id, transform(salary),
transform(dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
def tx(x):
return pow(10, float(x) / pow(2, 16) * 3.5)
expected = collections.Counter([(t[0], tx(t[3]), tx(t[1]))
for t in self.emp_table])
self.check_result(query, expected)
def test_safediv_2_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT SafeDiv(X.salary,X.dept_id-1)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 0,)
for t in self.emp_table])
self.check_result(query, expected)
def test_safediv_3_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT SafeDiv(X.salary,X.dept_id-1,42)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 42,)
for t in self.emp_table])
self.check_result(query, expected)
def test_answer_to_everything_function(self):
query = """
out = [TheAnswerToLifeTheUniverseAndEverything()];
STORE(out, OUTPUT);
"""
expected = collections.Counter([(42,)])
self.check_result(query, expected)
def test_least_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT least(X.id,X.dept_id,1)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(min(t[0], t[1], 1),)
for t in self.emp_table])
self.check_result(query, expected)
def test_greatest_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT greatest(X.id,X.dept_id,3)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(max(t[0], t[1], 3),)
for t in self.emp_table])
self.check_result(query, expected)
def test_lesser_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT lesser(X.id,X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(min(t[0], t[1]),)
for t in self.emp_table])
self.check_result(query, expected)
def test_greater_function(self):
query = """
out = [FROM SCAN(%s) AS X EMIT greater(X.id,X.dept_id)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(max(t[0], t[1],),)
for t in self.emp_table])
self.check_result(query, expected)
def test_uda_illegal_init(self):
query = """
uda Foo(x,y) {
[0 as A, *];
[A + x, A + y];
A;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Foo(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalWildcardException):
self.check_result(query, None)
def test_uda_illegal_update(self):
query = """
uda Foo(x,y) {
[0 as A, 1 as B];
[A + x + y, *];
A + B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Foo(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_uda_nested_emitter(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[0 as B];
[B + x];
Foo(B);
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_nested_init(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[Foo(0) as B];
[B + x];
B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_nested_update(self):
query = """
uda Foo(x) {
[0 as A];
[A + x];
[A];
};
uda Bar(x) {
[0 as B];
[Foo(B)];
B;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedAggregateException):
self.check_result(query, None)
def test_uda_unary_emit_arg_list(self):
query = """
uda MyAvg(val) {
[0 as _sum, 0 as _count];
[_sum + val, _count + 1];
[_sum / _count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyAvg(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def agg_func(x):
return float(sum(x)) / len(x)
expected = self.__aggregate_expected_result(agg_func)
self.check_result(query, expected)
def test_second_max_uda(self):
"""UDA to compute the second largest element in a collection."""
query = """
uda SecondMax(val) {
[0 as _max, 0 as second_max];
[case when val > _max then val else _max end,
case when val > _max then _max when val > second_max then val
else second_max end];
second_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SecondMax(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
def agg_func(x):
if len(x) < 2:
return 0
else:
return sorted(x, reverse=True)[1]
expected = self.__aggregate_expected_result(agg_func)
self.check_result(query, expected)
def test_multi_invocation_uda(self):
query = """
uda MaxDivMin(val) {
[9999999 as _min, 0 as _max];
[case when val < _min then val else _min end,
case when val > _max then val else _max end];
_max / _min;
};
out = [FROM SCAN(%s) AS X EMIT
MaxDivMin(id) + dept_id + MaxDivMin(salary), dept_id];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
max_salary = max(t[3] for t in tpls)
min_salary = min(t[3] for t in tpls)
max_id = max(t[0] for t in tpls)
min_id = min(t[0] for t in tpls)
results.append((k + float(max_salary) / min_salary +
float(max_id) / min_id, k))
self.check_result(query, collections.Counter(results))
def test_multiple_uda(self):
query = """
uda MyMax1(val) {
[0 as _max];
[case when val > _max then val else _max end];
_max;
};
uda MyMax2(val) {
[0 as _max];
[case when val > _max then val else _max end];
_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyMax1(salary), MyMax2(id)];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
max_salary = max(t[3] for t in tpls)
max_id = max(t[0] for t in tpls)
results.append((k, max_salary, max_id))
self.check_result(query, collections.Counter(results))
def test_uda_no_emit_clause(self):
query = """
uda MyCount() {
[0 as _count];
[_count + 1];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, MyCount()];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(len))
def test_uda_no_emit_clause_many_cols(self):
query = """
uda MyAggs(x) {
[0 as _count, 0 as _sum, 0 as _sumsq];
[_count + 1, _sum + x, _sumsq + x*x];
};
out = [FROM SCAN(%s) AS X EMIT MyAggs(salary) as [a, b, c]];
STORE(out, OUTPUT);
""" % self.emp_key
c = len(list(self.emp_table.elements()))
s = sum(d for a, b, c, d in self.emp_table.elements())
sq = sum(d * d for a, b, c, d in self.emp_table.elements())
expected = collections.Counter([(c, s, sq)])
self.check_result(query, expected)
# Test with two different column orders in case the undefined
# order used by Python is correct by chance.
query = """
uda MyAggs(x) {
[0 as _count, 0 as _sumsq, 0 as _sum];
[_count + 1, _sumsq + x*x, _sum + x];
};
out = [FROM SCAN(%s) AS X EMIT MyAggs(salary) as [a, b, c]];
STORE(out, OUTPUT);
""" % self.emp_key
c = len(list(self.emp_table.elements()))
sq = sum(d * d for a, b, c, d in self.emp_table.elements())
s = sum(d for a, b, c, d in self.emp_table.elements())
expected = collections.Counter([(c, sq, s)])
self.check_result(query, expected)
def test_uda_with_udf(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
out = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_0(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
out = [from inter emit $0];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
results = [(t[0],) for t in results]
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_1(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)];
out = [from inter emit $1];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k, max(t[3] + t[0] for t in tpls)))
results = [(t[1],) for t in results]
self.check_result(query, collections.Counter(results))
def test_uda_with_subsequent_project_2(self):
query = """
def foo(x, y): x + y;
uda max2(x, y) {
[0 as _max];
[case when foo(x, y) > _max then foo(x, y) else _max end];
_max;
};
inter = [FROM SCAN(%s) AS X EMIT dept_id, max2(salary, id)
, max2(dept_id, id)];
out = [from inter emit $1];
STORE(out, OUTPUT);
""" % self.emp_key
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
results.append((k,
max(t[3] + t[0] for t in tpls),
max(t[1] + t[0] for t in tpls)))
results = [(t[1],) for t in results]
self.check_result(query, collections.Counter(results))
def __run_multiple_emitter_test(self, include_column_names):
if include_column_names:
names = " AS [mysum, mycount, myavg]"
else:
names = ""
query = """
uda SumCountMean(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
[_sum, _count, _sum/_count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SumCountMean(salary) %s,
dept_id+3, max(id) as max_id];
STORE(out, OUTPUT);
""" % (self.emp_key, names)
d = collections.defaultdict(list)
for t in self.emp_table.elements():
d[t[1]].append(t)
results = []
for k, tpls in d.iteritems():
_sum = sum(x[3] for x in tpls)
_count = len(tpls)
_avg = float(_sum) / _count
_max_id = max(x[0] for x in tpls)
results.append((k, _sum, _count, _avg, k + 3, _max_id))
self.check_result(query, collections.Counter(results))
def test_uda_multiple_emitters_default_names(self):
self.__run_multiple_emitter_test(False)
def test_uda_multiple_emitters_provided_names(self):
self.__run_multiple_emitter_test(True)
scheme_actual = self.db.get_scheme('OUTPUT')
scheme_expected = scheme.Scheme([
('dept_id', types.LONG_TYPE), ('mysum', types.LONG_TYPE),
('mycount', types.LONG_TYPE), ('myavg', types.FLOAT_TYPE),
('_COLUMN4_', types.LONG_TYPE), ('max_id', types.LONG_TYPE)])
self.assertEquals(scheme_actual, scheme_expected)
def test_emit_arg_bad_column_name_length(self):
query = """
out = [FROM SCAN(%s) AS X EMIT dept_id AS [dept_id1, dept_id2]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_uda_bad_column_name_length(self):
query = """
uda Fubar(x, y, z) {
[0 as Q];
[Q + 1];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Fubar(1, salary, id)
AS [A, B, C, D]];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(IllegalColumnNamesException):
self.check_result(query, None)
def test_uda_init_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[Foo(0) as [A, B, C]];
[Q * 8];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_update_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[0 as Q];
[Foo(Q + 1)];
[1,2,3];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_result_tuple_valued(self):
query = """
uda Foo(x) {
[0 as Q];
[Q + 1];
[1,2,3];
};
uda Bar(x) {
[0 as Q];
[Q + 2];
[1,2,Foo(3)];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, Bar(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
def test_uda_multiple_emitters_nested(self):
"""Test that we raise an Exception if a tuple-valued UDA doesn't appear
by itself in an emit expression."""
query = """
uda SumCountMean(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
[_sum, _count, _sum/_count];
};
out = [FROM SCAN(%s) AS X EMIT dept_id, SumCountMean(salary) + 5];
STORE(out, OUTPUT);
""" % self.emp_key
with self.assertRaises(NestedTupleExpressionException):
self.check_result(query, None)
__DECOMPOSED_UDA = """
uda LogicalAvg(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
float(_sum); -- Note bogus return value
};
uda LocalAvg(x) {
[0 as _sum, 0 as _count];
[_sum + x, _count + 1];
};
uda RemoteAvg(_local_sum, _local_count) {
[0 as _sum, 0 as _count];
[_sum + _local_sum, _count + _local_count];
[_sum/_count];
};
uda* LogicalAvg {LocalAvg, RemoteAvg};
"""
__ARG_MAX_UDA = """
def pickval(id, salary, val, _id, _salary, _val):
case when salary > _salary then val
when salary = _salary and id > _id then val
else _val end;
uda ArgMax(id, dept_id, name, salary) {
[0 as _id, 0 as _dept_id, "" as _name, 0 as _salary];
[pickval(id, salary, id, _id, _salary, _id),
pickval(id, salary, dept_id, _id, _salary, _dept_id),
pickval(id, salary, name, _id, _salary, _name),
pickval(id, salary, salary, _id, _salary, _salary)];
[_id, _dept_id, _name, _salary];
};
"""
__ARG_MAX_UDA_UNNECESSARY_EXPR = """
def pickval(id, salary, val, _id, _salary, _val):
case when salary > _salary then val
when salary = _salary and id > _id then val
else _val end;
uda ArgMax(id, dept_id, name, salary) {
[0 as _id, 0 as _dept_id, "" as _name, 0 as _salary];
[pickval(id, salary, greater(id, id), _id, _salary, _id),
pickval(id, salary, lesser(dept_id, dept_id), _id, _salary,
_dept_id),
pickval(id, salary, case when name="" then name else name end, _id,
_salary, _name),
pickval(id, salary, salary * 1, _id, _salary, _salary)];
[_id, _dept_id, _name, _salary];
};
"""
def test_decomposable_average_uda(self):
"""Test of a decomposed average UDA.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """%s
out = [FROM SCAN(%s) AS X EMIT dept_id, LogicalAvg(salary)];
STORE(out, OUTPUT);
""" % (TestQueryFunctions.__DECOMPOSED_UDA, self.emp_key)
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t[3])
tuples = []
for key, vals in result_dict.iteritems():
_cnt = len(vals)
_sum = sum(vals)
tuples.append((key, float(_sum) / _cnt))
self.check_result(query, collections.Counter(tuples))
def test_decomposable_nary_uda(self):
query = """
uda Sum2(x, y) {
[0 as sum_x, 0 as sum_y];
[sum_x + x, sum_y + y];
};
uda* Sum2 {Sum2, Sum2};
out = [FROM SCAN(%s) AS X EMIT
Sum2(id, salary) AS [id_sum, salary_sum]];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
id_sum = sum(t[0] for t in self.emp_table.elements())
salary_sum = sum(t[3] for t in self.emp_table.elements())
tuples = [(id_sum, salary_sum)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda(self):
"""Test of an arg_max UDA.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_with_references(self):
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_with_functions(self):
"""Test of an arg_max UDA with expressions as inputs.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda(self):
"""Test of a decomposable arg_max UDA.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
def test_decomposable_arg_max_uda_with_references(self):
"""Test of a decomposable arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_with_functions(self):
"""Test of a decomposable arg_max UDA with expressions as inputs.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs(self):
"""Test of an arg_max UDA.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs_with_references(self):
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_arg_max_uda_internal_exprs_with_functions(self):
"""Test of an arg_max UDA with expressions as inputs.
"""
query = """
{arg}
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_internal_exprs(self):
"""Test of a decomposable arg_max UDA.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, dept_id, name, salary)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
"""Test of an arg_max UDA with named, unnamed, and dotted
attribute references.
"""
def test_decomposable_arg_max_uda_internal_exprs_with_references(self):
"""Test of a decomposable arg_max UDA with named, unnamed, and dotted
attribute references.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id, emp.dept_id, $2, emp.$3)
as [a, b, c, d]];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_arg_max_uda_internal_exprs_with_functions(self):
"""Test of a decomposable arg_max UDA with expressions as inputs.
"""
query = """
{arg}
uda* ArgMax {{ArgMax, ArgMax}};
emp = scan({emp});
out = [from emp emit ArgMax(id,
greater(dept_id, dept_id),
case when id=1 then name else name end,
salary)];
store(out, OUTPUT);
""".format(arg=self.__ARG_MAX_UDA_UNNECESSARY_EXPR, emp=self.emp_key)
tuples = [(a, b, c, d) for (a, b, c, d) in self.emp_table
if all(d > d1 or d == d1 and a >= a1
for a1, b1, c1, d1 in self.emp_table)]
self.check_result(query, collections.Counter(tuples))
def test_decomposable_average_uda_repeated(self):
"""Test of repeated invocations of decomposed UDAs."""
query = """%s
out = [FROM SCAN(%s) AS X EMIT dept_id,
LogicalAvg(salary) + LogicalAvg($0)];
STORE(out, OUTPUT);
""" % (TestQueryFunctions.__DECOMPOSED_UDA, self.emp_key)
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
tuples = []
for key, vals in result_dict.iteritems():
_cnt = len(vals)
_salary_sum = sum(t[3] for t in vals)
_id_sum = sum(t[0] for t in vals)
tuples.append((key, (float(_salary_sum) + float(_id_sum)) / _cnt))
self.check_result(query, collections.Counter(tuples))
def test_decomposable_sum_uda(self):
"""Test of a decomposed sum UDA.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """
uda MySumBroken(x) {
[0 as _sum];
[_sum + x];
17; -- broken
};
uda MySum(x) {
[0 as _sum];
[_sum + x];
};
uda* MySumBroken {MySum, MySum};
out = [FROM SCAN(%s) AS X EMIT dept_id, MySumBroken(salary)];
STORE(out, OUTPUT);
""" % self.emp_key
self.check_result(query, self.__aggregate_expected_result(sum))
def test_decomposable_uda_with_builtin_agg(self):
"""Test of a decomposed UDA + builtin aggregate.
Note that the logical aggregate returns a broken value, so
this test only passes if we decompose the aggregate properly.
"""
query = """
uda MySumBroken(x) {
[0 as _sum];
[_sum + x];
17; -- broken
};
uda MySum(x) {
[0 as _sum];
[_sum + x];
};
uda* MySumBroken {MySum, MySum};
out = [FROM SCAN(%s) AS X EMIT dept_id, MySumBroken(salary), SUM(id)];
STORE(out, OUTPUT);
""" % self.emp_key
result_dict = collections.defaultdict(list)
for t in self.emp_table.elements():
result_dict[t[1]].append(t)
tuples = []
for key, vals in result_dict.iteritems():
_salary_sum = sum(t[3] for t in vals)
_id_sum = sum(t[0] for t in vals)
tuples.append((key, _salary_sum, _id_sum))
self.check_result(query, collections.Counter(tuples))
def test_duplicate_decomposable_uda(self):
query = """
uda Agg1(x) {
[0 as _sum];
[_sum + x];
};
uda* Agg1 {Agg1, Agg1};
uda* Agg1 {Agg1, Agg1};
"""
with self.assertRaises(DuplicateFunctionDefinitionException):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail1(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Local(x, y) {
[0 as _sum];
[_sum + x];
};
uda* Logical {Local, Logical};
"""
with self.assertRaises(InvalidArgumentList):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail2(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Remote(x, y) {
[0 as _sum];
[_sum + x];
};
uda* Logical {Logical, Remote};
"""
with self.assertRaises(InvalidArgumentList):
self.check_result(query, None)
def test_decomposable_uda_type_check_fail3(self):
query = """
uda Logical(x) {
[0 as _sum];
[_sum + x];
};
uda Remote(x) {
[0 as _sum];
[_sum + x];
[1, 2, 3];
};
uda* Logical {Logical, Remote};
"""
with self.assertRaises(InvalidEmitList):
self.check_result(query, None)
def test_running_mean_sapply(self):
query = """
APPLY RunningMean(value) {
[0 AS _count, 0 AS _sum];
[_count + 1, _sum + value];
_sum / _count;
};
out = [FROM SCAN(%s) AS X EMIT id, RunningMean(X.salary)];
STORE(out, OUTPUT);
""" % self.emp_key
tps = []
_sum = 0
_count = 0
for emp in self.emp_table:
_sum += emp[3]
_count += 1
tps.append((emp[0], float(_sum) / _count))
self.check_result(query, collections.Counter(tps))
def test_sapply_multi_invocation(self):
query = """
APPLY RunningSum(x) {
[0 AS _sum];
[_sum + x];
_sum;
};
out = [FROM SCAN(%s) AS X
EMIT id, RunningSum(X.salary), RunningSum(id)];
STORE(out, OUTPUT);
""" % self.emp_key
tps = []
_sum1 = 0
_sum2 = 0
for emp in self.emp_table:
_sum1 += emp[3]
_sum2 += emp[0]
tps.append((emp[0], _sum1, _sum2))
self.check_result(query, collections.Counter(tps))
def test_118_regression(self):
"""Regression test for https://github.com/uwescience/datalogcompiler/issues/118""" # noqa
query = """
out = [FROM SCAN(%s) AS X WHERE dept_id = 2 AND salary = 5000 EMIT id];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0],) for x in self.emp_table.elements()
if x[1] == 2 and x[3] == 5000])
self.check_result(query, expected)
def test_scan_emp_empty_statement(self):
"""Test with an empty statement."""
query = """
;;;
emp = SCAN(%s);
STORE(emp, OUTPUT);;;
""" % self.emp_key
self.check_result(query, self.emp_table)
def test_empty_statement_parse(self):
"""Program that contains nothing but empty statements."""
with self.assertRaises(MyrialCompileException):
self.check_result(";", None)
def test_case_binary(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT id, CASE WHEN salary > 15000
THEN salary // salary
ELSE 0 // salary END];
STORE(rich, OUTPUT);
""" % self.emp_key
def func(y):
if y > 15000:
return 1
else:
return 0
expected = collections.Counter(
[(x[0], func(x[3])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_case_ternary(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT id,
CASE WHEN salary <= 5000 THEN "poor"
WHEN salary <= 25000 THEN "middle class"
ELSE "rich"
END];
STORE(rich, OUTPUT);
""" % self.emp_key
def func(y):
if y <= 5000:
return 'poor'
elif y <= 25000:
return 'middle class'
else:
return 'rich'
expected = collections.Counter(
[(x[0], func(x[3])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_case_aggregate(self):
query = """
emp = SCAN(%s);
rich = [FROM emp EMIT SUM(3 * CASE WHEN salary > 15000
THEN 1 ELSE 0 END)];
STORE(rich, OUTPUT);
""" % self.emp_key
_sum = 3 * len([x for x in self.emp_table.elements()
if x[3] > 15000])
self.check_result(query, collections.Counter([(_sum,)]))
def test_case_unbox(self):
query = """
TH = [15000];
A = [1 AS one, 2 AS two, 3 AS three];
emp = SCAN(%s);
rich = [FROM emp EMIT SUM(*A.three * CASE WHEN salary > *TH
THEN 1 ELSE 0 END)];
STORE(rich, OUTPUT);
""" % self.emp_key
_sum = 3 * len([x for x in self.emp_table.elements()
if x[3] > 15000])
self.check_result(query, collections.Counter([(_sum,)]))
def test_default_column_names(self):
with open('examples/groupby1.myl') as fh:
query = fh.read()
self.execute_query(query)
scheme = self.db.get_scheme('OUTPUT')
self.assertEquals(scheme.getName(0), "_COLUMN0_")
self.assertEquals(scheme.getName(1), "id")
def test_worker_id(self):
query = """
X = [FROM SCAN(%s) AS X EMIT X.id, WORKER_ID()];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter([(x[0], 0) for x
in self.emp_table.elements()])
self.check_result(query, expected)
def test_flip_zero(self):
"""flip(0) should always evaluate to false"""
query = """
X = [FROM SCAN(%s) AS X WHERE flip(0) EMIT *];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter()
self.check_result(query, expected)
def test_flip_one(self):
"""flip(1) should always evaluate to true"""
query = """
X = [FROM SCAN(%s) AS X WHERE flip(1) EMIT *];
STORE(X, OUTPUT);
""" % self.emp_key
expected = collections.Counter(self.emp_table.elements())
self.check_result(query, expected)
def test_substr(self):
query = """
ZERO = [0];
THREE = [3];
out = [FROM SCAN(%s) AS X EMIT X.id, substr(X.name, *ZERO, *THREE)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], x[2][0:3]) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_concat(self):
query = """
STRS = ["a" as a, "b" as b];
out = [FROM STRS EMIT concat(a, b)];
STORE(out, OUTPUT);
"""
expected = collections.Counter({('ab',): 1})
self.check_result(query, expected)
def test_byterange(self):
query = r"""
BYTES = [b'\xDE\xAD\xBE\xEF' AS bytes];
out = [FROM BYTES AS X EMIT byterange(X.bytes, 2, 4) as res];
STORE(out, OUTPUT);
"""
expected = collections.Counter({(b'\xBE\xEF',): 1})
self.check_result(query, expected)
def test_len(self):
query = """
out = [FROM SCAN(%s) AS X EMIT X.id, len(X.name)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], len(x[2])) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_head(self):
query = """
out = [FROM SCAN(%s) AS X EMIT X.id, head(X.name, 10)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], x[2][0:10]) for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_tail(self):
query = """
ZERO = [0];
THREE = [3];
out = [FROM SCAN(%s) AS X EMIT X.id, tail(X.name, 10)];
STORE(out, OUTPUT);
""" % self.emp_key
expected = collections.Counter(
[(x[0], (lambda i: i if len(i) <= 10 else i[len(i) - 10:])(x[2]))
for x in self.emp_table.elements()])
self.check_result(query, expected)
def test_column_name_reserved(self):
query = """
T = EMPTY(x:INT);
A = [FROM T EMIT SafeDiv(x, 3) AS SafeDiv];
STORE (A, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_bug_226(self):
query = """
T = scan({emp});
A = select id, salary from T where 1=1;
B = select id, salary from A where salary=90000;
C = select A.* from B, A where A.salary < B.salary;
STORE (C, OUTPUT);
""".format(emp=self.emp_key)
expected = collections.Counter(
(i, s) for (i, d, n, s) in self.emp_table
for (i2, d2, n2, s2) in self.emp_table
if s2 == 90000 and s < s2)
self.assertEquals(expected, self.execute_query(query))
def test_column_mixed_case_reserved(self):
query = """
T = EMPTY(x:INT);
A = [FROM T EMIT MAX(x) AS maX];
STORE (A, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_variable_name_reserved(self):
query = """
T = EMPTY(x:INT);
avg = COUNTALL(T);
STORE (countall, BadProgram);
"""
with self.assertRaises(ReservedTokenException):
self.check_result(query, None)
def test_empty_query(self):
query = """
T1 = empty(x:INT);
"""
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_sink(self):
query = """
ZERO = [0];
A = [from ZERO emit *];
SINK(A);
"""
self.evaluate_sink_query(query)
def test_string_cast(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT STRING(emp.dept_id) AS foo];
STORE(bc, OUTPUT);
""" % self.emp_key
ex = collections.Counter((str(d),) for (i, d, n, s) in self.emp_table)
ex_scheme = scheme.Scheme([('foo', types.STRING_TYPE)])
self.check_result(query, ex)
def test_float_cast(self):
query = """
emp = SCAN(%s);
bc = [FROM emp EMIT float(emp.dept_id) AS foo];
STORE(bc, OUTPUT);
""" % self.emp_key
ex = collections.Counter((float(d),) for (i, d, n, s) in self.emp_table) # noqa
ex_scheme = scheme.Scheme([('foo', types.DOUBLE_TYPE)])
self.check_result(query, ex, ex_scheme)
def test_scientific_notation(self):
literals = ["1.0e20", "3e40", "5e-6", ".7e8", ".9e-12",
"-3e45", "-6e-78", "9e+12", "3E4"]
query = """
emp = SCAN({});
bc = [FROM emp EMIT {}];
STORE(bc, OUTPUT);
""".format(self.emp_key, ','.join(literals))
ex = collections.Counter(
(tuple(map(float, literals)),) * len(self.emp_table)) # noqa
ex_scheme = scheme.Scheme([('$%d' % i, types.DOUBLE_TYPE)
for i in xrange(len(literals))])
self.check_result(query, ex, ex_scheme)
def test_sequence(self):
query = """
T1 = scan({rel});
store(T1, OUTPUT);
T2 = scan({rel});
store(T2, OUTPUT2);
""".format(rel=self.emp_key)
physical_plan = self.get_physical_plan(query)
self.assertIsInstance(physical_plan, raco.algebra.Sequence)
self.check_result(query, self.emp_table, output='OUTPUT')
self.check_result(query, self.emp_table, output='OUTPUT2')
def test_238_dont_renumber_columns(self):
# see https://github.com/uwescience/raco/issues/238
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit x2.a, x2.b];
z = [from y emit a];
store(z, OUTPUT);"""
self.check_result(query, collections.Counter([(1,)]))
def test_implicit_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $0, $1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_implicit_column_names2(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $2, $3];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_implicit_column_names3(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit $2, $1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_unbox_index_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2
emit x2.$0, x2.$1];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('b', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 2)]),
scheme=expected_scheme)
def test_duplicate_column_names(self):
query = """
x = [1 as a, 2 as b];
y = [from x as x1, x as x2 emit x1.a, x2.a];
store(y, OUTPUT);"""
expected_scheme = scheme.Scheme([('a', types.LONG_TYPE),
('a1', types.LONG_TYPE)])
self.check_result(query, collections.Counter([(1, 1)]),
scheme=expected_scheme)
def test_distinct_aggregate_combinations(self):
"""Test to make sure that aggregates of different columns are not
combined together by the optimizer."""
query = """
emp = scan(%s);
ans = [from emp emit sum(dept_id) as d, sum(salary) as s];
store(ans, OUTPUT);""" % self.emp_key
sum_dept_id = sum([e[1] for e in self.emp_table])
sum_salary = sum([e[3] for e in self.emp_table])
expected = collections.Counter([(sum_dept_id, sum_salary)])
self.check_result(query, expected)
def test_bug_245_dead_code_with_do_while_plan(self):
"""Test to make sure that a dead program (no Stores) with a DoWhile
throws the correct parse error."""
with open('examples/deadcode2.myl') as fh:
query = fh.read()
with self.assertRaises(MyrialCompileException):
self.check_result(query, None)
def test_simple_do_while(self):
"""count to 32 by powers of 2"""
with open('examples/iteration.myl') as fh:
query = fh.read()
expected = collections.Counter([(32, 5)])
self.check_result(query, expected, output="powersOfTwo")
def test_pyUDF_dotted_arguments(self):
query = """
T1=scan(%s);
out = [from T1 emit test(T1.id, T1.dept_id) As output];
store(out, OUTPUT);
""" % self.emp_key
plan = self.get_physical_plan(query, udas=[('test', LONG_TYPE)])
apply = [op for op in plan.walk() if isinstance(op, Apply)][0]
ref = apply.emitters[0][1]
assert str(ref) == "PYUDF(test, ['id', 'dept_id'], LONG_TYPE)"
def test_pyUDF_with_positional_arguments(self):
query = """
T1=scan(%s);
out = [from T1 emit test($0, $1) As output];
store(out, OUTPUT);
""" % self.emp_key
plan = self.get_physical_plan(query, udas=[('test', LONG_TYPE)])
apply = [op for op in plan.walk() if isinstance(op, Apply)][0]
ref = apply.emitters[0][1]
assert str(ref) == "PYUDF(test, ['$0', '$1'], LONG_TYPE)"
def test_pyUDF_uda(self):
query = """
uda Foo(x){
[0 as _count, 0 as _sum];
[ _count+1, test_uda(_sum, x)];
[ test_uda(_sum,_count) ];
};
T1 = [from scan(%s) as t emit Foo(t.id) As mask];
store(T1, out);
""" % self.emp_key
self.get_physical_plan(query, udas=[('test_uda', LONG_TYPE)])
|
[
"raco.scheme.Scheme",
"math.sqrt",
"math.ceil",
"math.tan",
"math.floor",
"raco.fake_data.FakeData.emp_table.elements",
"math.sin",
"collections.defaultdict",
"md5.new",
"math.cos",
"collections.Counter",
"math.log",
"raco.scheme.getName"
] |
[((11134, 11357), 'collections.Counter', 'collections.Counter', (["[('<NAME>', 'human resources'), ('<NAME>', 'accounting'), ('<NAME>',\n 'accounting'), ('<NAME>', 'human resources'), ('<NAME>', 'accounting'),\n ('<NAME>', 'engineering'), ('<NAME>', 'accounting')]"], {}), "([('<NAME>', 'human resources'), ('<NAME>', 'accounting'\n ), ('<NAME>', 'accounting'), ('<NAME>', 'human resources'), ('<NAME>',\n 'accounting'), ('<NAME>', 'engineering'), ('<NAME>', 'accounting')])\n", (11153, 11357), False, 'import collections\n'), ((3418, 3439), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (3437, 3439), False, 'import collections\n'), ((12431, 12478), 'collections.Counter', 'collections.Counter', (["[('<NAME>',), ('<NAME>',)]"], {}), "([('<NAME>',), ('<NAME>',)])\n", (12450, 12478), False, 'import collections\n'), ((13552, 13599), 'collections.Counter', 'collections.Counter', (["[('<NAME>',), ('<NAME>',)]"], {}), "([('<NAME>',), ('<NAME>',)])\n", (13571, 13599), False, 'import collections\n'), ((14000, 14078), 'collections.Counter', 'collections.Counter', (["[('<NAME>', 'accounting'), ('<NAME>', 'human resources')]"], {}), "([('<NAME>', 'accounting'), ('<NAME>', 'human resources')])\n", (14019, 14078), False, 'import collections\n'), ((14565, 14616), 'collections.Counter', 'collections.Counter', (["[('<NAME>', 1), ('<NAME>', 2)]"], {}), "([('<NAME>', 1), ('<NAME>', 2)])\n", (14584, 14616), False, 'import collections\n'), ((14985, 15036), 'collections.Counter', 'collections.Counter', (["[('<NAME>', 1), ('<NAME>', 2)]"], {}), "([('<NAME>', 1), ('<NAME>', 2)])\n", (15004, 15036), False, 'import collections\n'), ((15960, 15987), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (15979, 15987), False, 'import collections\n'), ((16527, 16554), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (16546, 16554), False, 'import collections\n'), ((17067, 17094), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (17086, 17094), False, 'import collections\n'), ((17488, 17513), 'collections.Counter', 'collections.Counter', (['tpls'], {}), '(tpls)\n', (17507, 17513), False, 'import collections\n'), ((17932, 17957), 'collections.Counter', 'collections.Counter', (['tpls'], {}), '(tpls)\n', (17951, 17957), False, 'import collections\n'), ((18304, 18331), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (18323, 18331), False, 'import collections\n'), ((18692, 18719), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (18711, 18719), False, 'import collections\n'), ((18947, 18997), 'collections.Counter', 'collections.Counter', (['[(25000,), (5000,), (90000,)]'], {}), '([(25000,), (5000,), (90000,)])\n', (18966, 18997), False, 'import collections\n'), ((19527, 19581), 'collections.Counter', 'collections.Counter', (['[(x[3],) for x in self.emp_table]'], {}), '([(x[3],) for x in self.emp_table])\n', (19546, 19581), False, 'import collections\n'), ((21472, 21507), 'collections.Counter', 'collections.Counter', (['thirdSortLimit'], {}), '(thirdSortLimit)\n', (21491, 21507), False, 'import collections\n'), ((22091, 22126), 'collections.Counter', 'collections.Counter', (['thirdSortLimit'], {}), '(thirdSortLimit)\n', (22110, 22126), False, 'import collections\n'), ((22724, 22755), 'collections.Counter', 'collections.Counter', (['[(50000,)]'], {}), '([(50000,)])\n', (22743, 22755), False, 'import collections\n'), ((23042, 23069), 'collections.Counter', 'collections.Counter', (['[(6,)]'], {}), '([(6,)])\n', (23061, 23069), False, 'import collections\n'), ((26602, 26631), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (26625, 26631), False, 'import collections\n'), ((26858, 26885), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (26877, 26885), False, 'import collections\n'), ((30225, 30269), 'collections.Counter', 'collections.Counter', (['[(y, x) for x, y in ex]'], {}), '([(y, x) for x, y in ex])\n', (30244, 30269), False, 'import collections\n'), ((30698, 30727), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (30721, 30727), False, 'import collections\n'), ((30955, 30982), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (30974, 30982), False, 'import collections\n'), ((31465, 31494), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (31488, 31494), False, 'import collections\n'), ((31722, 31749), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (31741, 31749), False, 'import collections\n'), ((32057, 32086), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (32080, 32086), False, 'import collections\n'), ((32303, 32330), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (32322, 32330), False, 'import collections\n'), ((32898, 32927), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (32921, 32927), False, 'import collections\n'), ((33219, 33247), 'collections.Counter', 'collections.Counter', (['tuples2'], {}), '(tuples2)\n', (33238, 33247), False, 'import collections\n'), ((33710, 33739), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (33733, 33739), False, 'import collections\n'), ((33969, 33996), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (33988, 33996), False, 'import collections\n'), ((34466, 34495), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (34489, 34495), False, 'import collections\n'), ((34723, 34750), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (34742, 34750), False, 'import collections\n'), ((36195, 36255), 'collections.Counter', 'collections.Counter', (["[('<NAME>',), ('<NAME>',), ('<NAME>',)]"], {}), "([('<NAME>',), ('<NAME>',), ('<NAME>',)])\n", (36214, 36255), False, 'import collections\n'), ((45012, 45091), 'collections.Counter', 'collections.Counter', (['[(t[0], t[1] + t[3], t[3] - t[1]) for t in self.emp_table]'], {}), '([(t[0], t[1] + t[3], t[3] - t[1]) for t in self.emp_table])\n', (45031, 45091), False, 'import collections\n'), ((46474, 46544), 'collections.Counter', 'collections.Counter', (['[(t[0], t[1] * t[3] / 2) for t in self.emp_table]'], {}), '([(t[0], t[1] * t[3] / 2) for t in self.emp_table])\n', (46493, 46544), False, 'import collections\n'), ((46829, 46858), 'collections.Counter', 'collections.Counter', (['[(val,)]'], {}), '([(val,)])\n', (46848, 46858), False, 'import collections\n'), ((47142, 47171), 'collections.Counter', 'collections.Counter', (['[(val,)]'], {}), '([(val,)])\n', (47161, 47171), False, 'import collections\n'), ((47475, 47539), 'collections.Counter', 'collections.Counter', (['[(t[0], t[3] + 19) for t in self.emp_table]'], {}), '([(t[0], t[3] + 19) for t in self.emp_table])\n', (47494, 47539), False, 'import collections\n'), ((47874, 47938), 'collections.Counter', 'collections.Counter', (['[(t[0], t[3] + 10) for t in self.emp_table]'], {}), '([(t[0], t[3] + 10) for t in self.emp_table])\n', (47893, 47938), False, 'import collections\n'), ((48787, 48881), 'collections.Counter', 'collections.Counter', (['[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 0,) for t in self.emp_table]'], {}), '([(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 0,) for t in\n self.emp_table])\n', (48806, 48881), False, 'import collections\n'), ((49156, 49251), 'collections.Counter', 'collections.Counter', (['[(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 42,) for t in self.emp_table]'], {}), '([(t[3] / (t[1] - 1) if t[1] - 1 > 0 else 42,) for t in\n self.emp_table])\n', (49175, 49251), False, 'import collections\n'), ((49507, 49535), 'collections.Counter', 'collections.Counter', (['[(42,)]'], {}), '([(42,)])\n', (49526, 49535), False, 'import collections\n'), ((54835, 54864), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (54858, 54864), False, 'import collections\n'), ((55852, 55881), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (55875, 55881), False, 'import collections\n'), ((57092, 57125), 'collections.Counter', 'collections.Counter', (['[(c, s, sq)]'], {}), '([(c, s, sq)])\n', (57111, 57125), False, 'import collections\n'), ((57773, 57806), 'collections.Counter', 'collections.Counter', (['[(c, sq, s)]'], {}), '([(c, sq, s)])\n', (57792, 57806), False, 'import collections\n'), ((58220, 58249), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (58243, 58249), False, 'import collections\n'), ((58937, 58966), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (58960, 58966), False, 'import collections\n'), ((59699, 59728), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (59722, 59728), False, 'import collections\n'), ((60520, 60549), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (60543, 60549), False, 'import collections\n'), ((61499, 61528), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (61522, 61528), False, 'import collections\n'), ((62245, 62447), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('dept_id', types.LONG_TYPE), ('mysum', types.LONG_TYPE), ('mycount',\n types.LONG_TYPE), ('myavg', types.FLOAT_TYPE), ('_COLUMN4_', types.\n LONG_TYPE), ('max_id', types.LONG_TYPE)]"], {}), "([('dept_id', types.LONG_TYPE), ('mysum', types.LONG_TYPE), (\n 'mycount', types.LONG_TYPE), ('myavg', types.FLOAT_TYPE), ('_COLUMN4_',\n types.LONG_TYPE), ('max_id', types.LONG_TYPE)])\n", (62258, 62447), True, 'import raco.scheme as scheme\n'), ((67667, 67696), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (67690, 67696), False, 'import collections\n'), ((68406, 68435), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (68429, 68435), False, 'import collections\n'), ((77543, 77572), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (77566, 77572), False, 'import collections\n'), ((79347, 79376), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (79370, 79376), False, 'import collections\n'), ((86336, 86357), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (86355, 86357), False, 'import collections\n'), ((87285, 87318), 'collections.Counter', 'collections.Counter', (["{('ab',): 1}"], {}), "({('ab',): 1})\n", (87304, 87318), False, 'import collections\n'), ((87592, 87632), 'collections.Counter', 'collections.Counter', (["{(b'\\xbe\\xef',): 1}"], {}), "({(b'\\xbe\\xef',): 1})\n", (87611, 87632), False, 'import collections\n'), ((89331, 89458), 'collections.Counter', 'collections.Counter', (['((i, s) for i, d, n, s in self.emp_table for i2, d2, n2, s2 in self.\n emp_table if s2 == 90000 and s < s2)'], {}), '((i, s) for i, d, n, s in self.emp_table for i2, d2, n2,\n s2 in self.emp_table if s2 == 90000 and s < s2)\n', (89350, 89458), False, 'import collections\n'), ((90749, 90792), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('foo', types.STRING_TYPE)]"], {}), "([('foo', types.STRING_TYPE)])\n", (90762, 90792), True, 'import raco.scheme as scheme\n'), ((91126, 91169), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('foo', types.DOUBLE_TYPE)]"], {}), "([('foo', types.DOUBLE_TYPE)])\n", (91139, 91169), True, 'import raco.scheme as scheme\n'), ((92831, 92894), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('a', types.LONG_TYPE), ('b', types.LONG_TYPE)]"], {}), "([('a', types.LONG_TYPE), ('b', types.LONG_TYPE)])\n", (92844, 92894), True, 'import raco.scheme as scheme\n'), ((93262, 93325), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('a', types.LONG_TYPE), ('b', types.LONG_TYPE)]"], {}), "([('a', types.LONG_TYPE), ('b', types.LONG_TYPE)])\n", (93275, 93325), True, 'import raco.scheme as scheme\n'), ((93693, 93756), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('a', types.LONG_TYPE), ('b', types.LONG_TYPE)]"], {}), "([('a', types.LONG_TYPE), ('b', types.LONG_TYPE)])\n", (93706, 93756), True, 'import raco.scheme as scheme\n'), ((94132, 94195), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('a', types.LONG_TYPE), ('b', types.LONG_TYPE)]"], {}), "([('a', types.LONG_TYPE), ('b', types.LONG_TYPE)])\n", (94145, 94195), True, 'import raco.scheme as scheme\n'), ((94554, 94618), 'raco.scheme.Scheme', 'scheme.Scheme', (["[('a', types.LONG_TYPE), ('a1', types.LONG_TYPE)]"], {}), "([('a', types.LONG_TYPE), ('a1', types.LONG_TYPE)])\n", (94567, 94618), True, 'import raco.scheme as scheme\n'), ((95240, 95288), 'collections.Counter', 'collections.Counter', (['[(sum_dept_id, sum_salary)]'], {}), '([(sum_dept_id, sum_salary)])\n', (95259, 95288), False, 'import collections\n'), ((95869, 95899), 'collections.Counter', 'collections.Counter', (['[(32, 5)]'], {}), '([(32, 5)])\n', (95888, 95899), False, 'import collections\n'), ((2407, 2436), 'collections.Counter', 'collections.Counter', (['expected'], {}), '(expected)\n', (2426, 2436), False, 'import collections\n'), ((22493, 22517), 'collections.Counter', 'collections.Counter', (['res'], {}), '(res)\n', (22512, 22517), False, 'import collections\n'), ((39878, 39887), 'md5.new', 'md5.new', ([], {}), '()\n', (39885, 39887), False, 'import md5\n'), ((55342, 55370), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (55361, 55370), False, 'import collections\n'), ((56196, 56224), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (56215, 56224), False, 'import collections\n'), ((58482, 58510), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (58501, 58510), False, 'import collections\n'), ((59244, 59272), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (59263, 59272), False, 'import collections\n'), ((60006, 60034), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (60025, 60034), False, 'import collections\n'), ((60915, 60943), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (60934, 60943), False, 'import collections\n'), ((61925, 61953), 'collections.Counter', 'collections.Counter', (['results'], {}), '(results)\n', (61944, 61953), False, 'import collections\n'), ((68000, 68027), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (68019, 68027), False, 'import collections\n'), ((68724, 68751), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (68743, 68751), False, 'import collections\n'), ((69268, 69295), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (69287, 69295), False, 'import collections\n'), ((69891, 69918), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (69910, 69918), False, 'import collections\n'), ((70601, 70628), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (70620, 70628), False, 'import collections\n'), ((71241, 71268), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (71260, 71268), False, 'import collections\n'), ((72068, 72095), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (72087, 72095), False, 'import collections\n'), ((72844, 72871), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (72863, 72871), False, 'import collections\n'), ((73420, 73447), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (73439, 73447), False, 'import collections\n'), ((74075, 74102), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (74094, 74102), False, 'import collections\n'), ((74817, 74844), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (74836, 74844), False, 'import collections\n'), ((75489, 75516), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (75508, 75516), False, 'import collections\n'), ((76348, 76375), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (76367, 76375), False, 'import collections\n'), ((77155, 77182), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (77174, 77182), False, 'import collections\n'), ((77966, 77993), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (77985, 77993), False, 'import collections\n'), ((79717, 79744), 'collections.Counter', 'collections.Counter', (['tuples'], {}), '(tuples)\n', (79736, 79744), False, 'import collections\n'), ((81822, 81846), 'collections.Counter', 'collections.Counter', (['tps'], {}), '(tps)\n', (81841, 81846), False, 'import collections\n'), ((82399, 82423), 'collections.Counter', 'collections.Counter', (['tps'], {}), '(tps)\n', (82418, 82423), False, 'import collections\n'), ((84954, 84984), 'collections.Counter', 'collections.Counter', (['[(_sum,)]'], {}), '([(_sum,)])\n', (84973, 84984), False, 'import collections\n'), ((85428, 85458), 'collections.Counter', 'collections.Counter', (['[(_sum,)]'], {}), '([(_sum,)])\n', (85447, 85458), False, 'import collections\n'), ((85688, 85705), 'raco.scheme.getName', 'scheme.getName', (['(0)'], {}), '(0)\n', (85702, 85705), True, 'import raco.scheme as scheme\n'), ((85746, 85763), 'raco.scheme.getName', 'scheme.getName', (['(1)'], {}), '(1)\n', (85760, 85763), True, 'import raco.scheme as scheme\n'), ((92591, 92618), 'collections.Counter', 'collections.Counter', (['[(1,)]'], {}), '([(1,)])\n', (92610, 92618), False, 'import collections\n'), ((92969, 92998), 'collections.Counter', 'collections.Counter', (['[(1, 2)]'], {}), '([(1, 2)])\n', (92988, 92998), False, 'import collections\n'), ((93400, 93429), 'collections.Counter', 'collections.Counter', (['[(1, 2)]'], {}), '([(1, 2)])\n', (93419, 93429), False, 'import collections\n'), ((93831, 93860), 'collections.Counter', 'collections.Counter', (['[(1, 2)]'], {}), '([(1, 2)])\n', (93850, 93860), False, 'import collections\n'), ((94270, 94299), 'collections.Counter', 'collections.Counter', (['[(1, 2)]'], {}), '([(1, 2)])\n', (94289, 94299), False, 'import collections\n'), ((94693, 94722), 'collections.Counter', 'collections.Counter', (['[(1, 1)]'], {}), '([(1, 1)])\n', (94712, 94722), False, 'import collections\n'), ((2657, 2686), 'raco.fake_data.FakeData.emp_table.elements', 'FakeData.emp_table.elements', ([], {}), '()\n', (2684, 2686), False, 'from raco.fake_data import FakeData\n'), ((35456, 35477), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (35475, 35477), False, 'import collections\n'), ((36636, 36657), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (36655, 36657), False, 'import collections\n'), ((37049, 37070), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (37068, 37070), False, 'import collections\n'), ((40715, 40736), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (40734, 40736), False, 'import collections\n'), ((41031, 41052), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (41050, 41052), False, 'import collections\n'), ((41330, 41351), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (41349, 41351), False, 'import collections\n'), ((41639, 41660), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (41658, 41660), False, 'import collections\n'), ((41921, 41942), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (41940, 41942), False, 'import collections\n'), ((42190, 42211), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (42209, 42211), False, 'import collections\n'), ((42486, 42507), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (42505, 42507), False, 'import collections\n'), ((42759, 42780), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (42778, 42780), False, 'import collections\n'), ((43048, 43069), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (43067, 43069), False, 'import collections\n'), ((43365, 43386), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (43384, 43386), False, 'import collections\n'), ((43742, 43763), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (43761, 43763), False, 'import collections\n'), ((44068, 44089), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (44087, 44089), False, 'import collections\n'), ((44403, 44424), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (44422, 44424), False, 'import collections\n'), ((44749, 44770), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (44768, 44770), False, 'import collections\n'), ((37601, 37613), 'math.ceil', 'math.ceil', (['b'], {}), '(b)\n', (37610, 37613), False, 'import math\n'), ((37918, 37929), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (37926, 37929), False, 'import math\n'), ((38238, 38251), 'math.floor', 'math.floor', (['b'], {}), '(b)\n', (38248, 38251), False, 'import math\n'), ((38570, 38581), 'math.log', 'math.log', (['b'], {}), '(b)\n', (38578, 38581), False, 'import math\n'), ((38908, 38919), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (38916, 38919), False, 'import math\n'), ((39238, 39250), 'math.sqrt', 'math.sqrt', (['b'], {}), '(b)\n', (39247, 39250), False, 'import math\n'), ((39578, 39589), 'math.tan', 'math.tan', (['b'], {}), '(b)\n', (39586, 39589), False, 'import math\n')]
|
import cdms2,sys,cdutil,os,cdat_info
f=cdms2.open(os.path.join(cdat_info.get_sampledata_path(),"navy_land.nc"))
navy_frac = f("sftlf")/100.
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1)).getGrid()
mask = cdutil.generateLandSeaMask(target,navy_frac)
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1))
mask = cdutil.generateLandSeaMask(target,navy_frac)
target=cdms2.createGaussianGrid(64)
mask = cdutil.generateLandSeaMask(target)
target = cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))("clt",slice(0,1),latitude=(15,85),longitude=(-175,-65)).getGrid()
mask = cdutil.generateLandSeaMask(target)
#import vcs
#x=vcs.init()
#x.plot(mask)
#raw_input()
|
[
"cdutil.generateLandSeaMask",
"cdat_info.get_sampledata_path",
"cdms2.createGaussianGrid"
] |
[((253, 298), 'cdutil.generateLandSeaMask', 'cdutil.generateLandSeaMask', (['target', 'navy_frac'], {}), '(target, navy_frac)\n', (279, 298), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((399, 444), 'cdutil.generateLandSeaMask', 'cdutil.generateLandSeaMask', (['target', 'navy_frac'], {}), '(target, navy_frac)\n', (425, 444), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((451, 479), 'cdms2.createGaussianGrid', 'cdms2.createGaussianGrid', (['(64)'], {}), '(64)\n', (475, 479), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((487, 521), 'cdutil.generateLandSeaMask', 'cdutil.generateLandSeaMask', (['target'], {}), '(target)\n', (513, 521), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((671, 705), 'cdutil.generateLandSeaMask', 'cdutil.generateLandSeaMask', (['target'], {}), '(target)\n', (697, 705), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((64, 95), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (93, 95), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((331, 362), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (360, 362), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((175, 206), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (204, 206), False, 'import cdms2, sys, cdutil, os, cdat_info\n'), ((555, 586), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (584, 586), False, 'import cdms2, sys, cdutil, os, cdat_info\n')]
|
import django
if django.get_version() >= '1.5':
from django.conf.urls import patterns, url
else:
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^simple/action/$', 'simpleAction', name = 'simpleAction'),
)
|
[
"django.conf.urls.defaults.url",
"django.get_version"
] |
[((18, 38), 'django.get_version', 'django.get_version', ([], {}), '()\n', (36, 38), False, 'import django\n'), ((190, 250), 'django.conf.urls.defaults.url', 'url', (['"""^simple/action/$"""', '"""simpleAction"""'], {'name': '"""simpleAction"""'}), "('^simple/action/$', 'simpleAction', name='simpleAction')\n", (193, 250), False, 'from django.conf.urls.defaults import patterns, url\n')]
|
# <NAME>
import numpy as np
import pylab as plt
from spectral.io import envi
import os, sys
sys.path.append('../utils')
from fpa import FPA
I = envi.open('../data/EMIT_LinearityMap_20220117.hdr').load()
thresh = 20
fpa = FPA('../config/tvac2_config.json')
for band in range(I.shape[2]):
x = np.squeeze(I[:,:,band])
# Remove anomalously high or low values
for row in range(1,x.shape[0]):
for col in range(x.shape[1]):
if abs(x[row,col])>thresh:
x[row,col] = x[row-1,col]
# Copy and paste linearity columns over the first aquisition zone,
# which is anomalous
for col in range(24,44):
x[:,col] = x[:,44]
# Copy and paste linearity columns over the goober zone,
# which is anomalous
for col in range(1020,1027):
x[:,col] = x[:,1027]
# Copy and paste linearity rows over the OSF filter,
# which is anomalous
for lo, hi in fpa.osf_seam_positions:
for row in range(lo, hi+1):
x[row,:] = x[lo-1,:]
I[:,:,band] = x.reshape((x.shape[0],x.shape[1],1))
envi.save_image('../data/EMIT_LinearityMap_20220117.hdr',I,ext='',force=True)
|
[
"sys.path.append",
"spectral.io.envi.save_image",
"spectral.io.envi.open",
"fpa.FPA",
"numpy.squeeze"
] |
[((92, 119), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (107, 119), False, 'import os, sys\n'), ((224, 258), 'fpa.FPA', 'FPA', (['"""../config/tvac2_config.json"""'], {}), "('../config/tvac2_config.json')\n", (227, 258), False, 'from fpa import FPA\n'), ((1019, 1104), 'spectral.io.envi.save_image', 'envi.save_image', (['"""../data/EMIT_LinearityMap_20220117.hdr"""', 'I'], {'ext': '""""""', 'force': '(True)'}), "('../data/EMIT_LinearityMap_20220117.hdr', I, ext='', force=True\n )\n", (1034, 1104), False, 'from spectral.io import envi\n'), ((297, 322), 'numpy.squeeze', 'np.squeeze', (['I[:, :, band]'], {}), '(I[:, :, band])\n', (307, 322), True, 'import numpy as np\n'), ((145, 196), 'spectral.io.envi.open', 'envi.open', (['"""../data/EMIT_LinearityMap_20220117.hdr"""'], {}), "('../data/EMIT_LinearityMap_20220117.hdr')\n", (154, 196), False, 'from spectral.io import envi\n')]
|
from process import *
import sys
import time
import signal
def main(argv):
# proc = Process.from_name('Gw.exe')
proc = Process(3356)
dbg = ProcessDebugger(proc)
scanner = ProcessScanner(proc)
def signal_handler(signal, frame):
dbg.detach()
signal.signal(signal.SIGINT, signal_handler)
@Hook.fastcall(LPVOID, DWORD, LPVOID)
def OnSendPacket(ctx, size, packet):
header = proc.read(packet, 'I')[0]
print('Packet {%-3d, %d, 0x%X}' % (size, header, header))
@Hook.fastcall(LPVOID)
def OnRecvPacket(packet):
header = proc.read(packet, 'I')[0]
print(header)
@Hook.fastcall(LPVOID, DWORD)
def OnWriteChatLog(msg, channel):
print(f'New message in channel {channel}')
addr = scanner.find(b'\x55\x8B\xEC\x83\xEC\x2C\x53\x56\x57\x8B\xF9\x85')
dbg.add_hook(addr, OnSendPacket)
# dbg.add_hook(0x007DE540, OnWriteChatLog)
"""
addr = scanner.find(b'\x50\x52\x8B\x55\x0C\xC7\x45\xF8', -0x23)
addr = proc.read(addr)[0] # 0xA2B294
addr = proc.read(addr)[0] # gs = *(GameServer **)0xA2B294, 0xa2b294
addr = proc.read(addr + 8)[0] # gs->consts
gs_srv_codecs, _, gs_srv_codecs_count = proc.read(addr + 44, 'III')
addr_gs_srv = range(gs_srv_codecs, gs_srv_codecs + (12 * gs_srv_codecs_count), 12) # GAME_SERVER
for id, addr in enumerate(addr_gs_srv):
fields_addr, count, handler = proc.read(addr, 'III')
if not handler:
continue
if id in ignored_stoc:
continue
# dbg.add_hook(handler, OnRecvPacket)
"""
print(f'Start debugging process {proc.name}, {proc.id}')
return dbg.run(frequency=250)
if __name__ == '__main__':
error = main(sys.argv[1:])
sys.exit(error)
|
[
"signal.signal",
"sys.exit"
] |
[((276, 320), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (289, 320), False, 'import signal\n'), ((1759, 1774), 'sys.exit', 'sys.exit', (['error'], {}), '(error)\n', (1767, 1774), False, 'import sys\n')]
|
# -*- coding: utf8 -*-
""" SunFinder 的协程版本
"""
from __future__ import unicode_literals
from gevent.pool import Pool
from .subfinder import SubFinder
class SubFinderGevent(SubFinder):
""" SubFinder Thread version
"""
def _init_pool(self):
self.pool = Pool(10)
|
[
"gevent.pool.Pool"
] |
[((272, 280), 'gevent.pool.Pool', 'Pool', (['(10)'], {}), '(10)\n', (276, 280), False, 'from gevent.pool import Pool\n')]
|
import bottle
import model
result = ''
kolicina2 = 0
st = 0
vhodna = ''
izhodna = ''
@bottle.get('/')
def index():
return bottle.template('index.tpl', result=result)
@bottle.get('/pretvori/')
def pretvori():
kolicina = kolicina2
global st
st = float(bottle.request.query['st'])
global vhodna
vhodna = bottle.request.query['enota1']
global izhodna
izhodna = bottle.request.query['enota2']
result = model.pretvarjanje(kolicina, st, vhodna, izhodna)
return bottle.template('rezultat.tpl', result = result, st=st, vhodna=vhodna, izhodna=izhodna)
@bottle.get('/na_dolzino/')
def na_dolzino():
global kolicina2
kolicina2 = 1
return bottle.template('dolzina.tpl', result=result)
@bottle.get('/na_prostornino/')
def na_prostornino():
global kolicina2
kolicina2 = 2
return bottle.template('prostornina.tpl', result=result)
@bottle.get('/na_tezo/')
def na_tezo():
global kolicina2
kolicina2 = 3
return bottle.template('teza.tpl', result=result)
@bottle.get('/na_cas/')
def na_cas():
global kolicina2
kolicina2 = 4
return bottle.template('cas.tpl', result=result)
@bottle.get('/nazaj/')
def nazaj():
return bottle.template('index.tpl')
bottle.run(reloader=True, debug=True)
|
[
"model.pretvarjanje",
"bottle.template",
"bottle.get",
"bottle.run"
] |
[((97, 112), 'bottle.get', 'bottle.get', (['"""/"""'], {}), "('/')\n", (107, 112), False, 'import bottle\n'), ((189, 213), 'bottle.get', 'bottle.get', (['"""/pretvori/"""'], {}), "('/pretvori/')\n", (199, 213), False, 'import bottle\n'), ((614, 640), 'bottle.get', 'bottle.get', (['"""/na_dolzino/"""'], {}), "('/na_dolzino/')\n", (624, 640), False, 'import bottle\n'), ((763, 793), 'bottle.get', 'bottle.get', (['"""/na_prostornino/"""'], {}), "('/na_prostornino/')\n", (773, 793), False, 'import bottle\n'), ((924, 947), 'bottle.get', 'bottle.get', (['"""/na_tezo/"""'], {}), "('/na_tezo/')\n", (934, 947), False, 'import bottle\n'), ((1064, 1086), 'bottle.get', 'bottle.get', (['"""/na_cas/"""'], {}), "('/na_cas/')\n", (1074, 1086), False, 'import bottle\n'), ((1203, 1224), 'bottle.get', 'bottle.get', (['"""/nazaj/"""'], {}), "('/nazaj/')\n", (1213, 1224), False, 'import bottle\n'), ((1287, 1324), 'bottle.run', 'bottle.run', ([], {'reloader': '(True)', 'debug': '(True)'}), '(reloader=True, debug=True)\n', (1297, 1324), False, 'import bottle\n'), ((139, 182), 'bottle.template', 'bottle.template', (['"""index.tpl"""'], {'result': 'result'}), "('index.tpl', result=result)\n", (154, 182), False, 'import bottle\n'), ((460, 509), 'model.pretvarjanje', 'model.pretvarjanje', (['kolicina', 'st', 'vhodna', 'izhodna'], {}), '(kolicina, st, vhodna, izhodna)\n', (478, 509), False, 'import model\n'), ((522, 611), 'bottle.template', 'bottle.template', (['"""rezultat.tpl"""'], {'result': 'result', 'st': 'st', 'vhodna': 'vhodna', 'izhodna': 'izhodna'}), "('rezultat.tpl', result=result, st=st, vhodna=vhodna,\n izhodna=izhodna)\n", (537, 611), False, 'import bottle\n'), ((713, 758), 'bottle.template', 'bottle.template', (['"""dolzina.tpl"""'], {'result': 'result'}), "('dolzina.tpl', result=result)\n", (728, 758), False, 'import bottle\n'), ((870, 919), 'bottle.template', 'bottle.template', (['"""prostornina.tpl"""'], {'result': 'result'}), "('prostornina.tpl', result=result)\n", (885, 919), False, 'import bottle\n'), ((1017, 1059), 'bottle.template', 'bottle.template', (['"""teza.tpl"""'], {'result': 'result'}), "('teza.tpl', result=result)\n", (1032, 1059), False, 'import bottle\n'), ((1155, 1196), 'bottle.template', 'bottle.template', (['"""cas.tpl"""'], {'result': 'result'}), "('cas.tpl', result=result)\n", (1170, 1196), False, 'import bottle\n'), ((1251, 1279), 'bottle.template', 'bottle.template', (['"""index.tpl"""'], {}), "('index.tpl')\n", (1266, 1279), False, 'import bottle\n')]
|
# Author: <NAME>
# email: <EMAIL>
import numpy as np
import init_paths
from images_io import load_image
from xinshuo_visualization import visualize_image
def test_load_image():
image_path = '../lena.png'
print('basic')
img = load_image(image_path)
assert img.shape == (512, 512, 3)
print('testing for resizing')
img = load_image(image_path, resize_factor=2.0)
assert img.shape == (1024, 1024, 3)
print('testing for resizing')
img = load_image(image_path, target_size=[1033, 1033])
assert img.shape == (1033, 1033, 3)
print('testing for rotation')
img = load_image(image_path, input_angle=45)
visualize_image(img, vis=True)
assert img.shape == (726, 726, 3)
print('testing for rotation')
img = load_image(image_path, input_angle=450)
visualize_image(img, vis=True)
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_load_image()
|
[
"xinshuo_visualization.visualize_image",
"images_io.load_image"
] |
[((231, 253), 'images_io.load_image', 'load_image', (['image_path'], {}), '(image_path)\n', (241, 253), False, 'from images_io import load_image\n'), ((329, 370), 'images_io.load_image', 'load_image', (['image_path'], {'resize_factor': '(2.0)'}), '(image_path, resize_factor=2.0)\n', (339, 370), False, 'from images_io import load_image\n'), ((447, 495), 'images_io.load_image', 'load_image', (['image_path'], {'target_size': '[1033, 1033]'}), '(image_path, target_size=[1033, 1033])\n', (457, 495), False, 'from images_io import load_image\n'), ((572, 610), 'images_io.load_image', 'load_image', (['image_path'], {'input_angle': '(45)'}), '(image_path, input_angle=45)\n', (582, 610), False, 'from images_io import load_image\n'), ((612, 642), 'xinshuo_visualization.visualize_image', 'visualize_image', (['img'], {'vis': '(True)'}), '(img, vis=True)\n', (627, 642), False, 'from xinshuo_visualization import visualize_image\n'), ((717, 756), 'images_io.load_image', 'load_image', (['image_path'], {'input_angle': '(450)'}), '(image_path, input_angle=450)\n', (727, 756), False, 'from images_io import load_image\n'), ((758, 788), 'xinshuo_visualization.visualize_image', 'visualize_image', (['img'], {'vis': '(True)'}), '(img, vis=True)\n', (773, 788), False, 'from xinshuo_visualization import visualize_image\n')]
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Test that headers are on all files
"""
# stdlib
from pathlib import Path
import os
# third-party
import pytest
import yaml
addheader_add = pytest.importorskip("addheader.add", reason="`addheader` package is not available")
@pytest.fixture
def package_root():
"""Determine package root.
"""
import idaes
return Path(idaes.__file__).parent
@pytest.fixture
def patterns(package_root):
"""Grab glob patterns from config file.
"""
conf_file = package_root.parent / "addheader.yml"
if not conf_file.exists():
print(f"Cannot load configuration file from '{conf_file}'. Perhaps this is not development mode?")
return None
with open(conf_file) as f:
conf_data = yaml.safe_load(f)
print(f"Patterns for finding files with headers: {conf_data['patterns']}")
return conf_data["patterns"]
@pytest.mark.unit
def test_headers(package_root, patterns):
if patterns is None:
print(f"ERROR: Did not get glob patterns: skipping test")
else:
# modify patterns to match the files that should have headers
ff = addheader_add.FileFinder(package_root, glob_patterns=patterns)
has_header, missing_header = addheader_add.detect_files(ff)
# ignore empty files (probably should add option in 'detect_files' for this)
nonempty_missing_header = list(filter(lambda p: p.stat().st_size > 0, missing_header))
#
if len(nonempty_missing_header) > 0:
pfx = str(package_root.resolve())
pfx_len = len(pfx)
file_list = ", ".join([str(p)[pfx_len + 1:] for p in nonempty_missing_header])
print(f"Missing headers from files under '{pfx}{os.path.sep}': {file_list}")
# uncomment to require all files to have headers
assert len(nonempty_missing_header) == 0
|
[
"pytest.importorskip",
"yaml.safe_load",
"pathlib.Path"
] |
[((919, 1007), 'pytest.importorskip', 'pytest.importorskip', (['"""addheader.add"""'], {'reason': '"""`addheader` package is not available"""'}), "('addheader.add', reason=\n '`addheader` package is not available')\n", (938, 1007), False, 'import pytest\n'), ((1108, 1128), 'pathlib.Path', 'Path', (['idaes.__file__'], {}), '(idaes.__file__)\n', (1112, 1128), False, 'from pathlib import Path\n'), ((1497, 1514), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1511, 1514), False, 'import yaml\n')]
|
'''
Created on Feb 5, 2018
@author: <NAME>
'''
import CONSTANTS,time
def log_arguments(f):
"""
@f: Function to be wrapped with the logging of it's arguments
"""
def logger(*args,**kwargs):
"""
wrapping function, it logs the arguments of the decorated function
"""
if CONSTANTS.LOG_ARGUMENTS:
#print("Function "+f.__name__+" called:")
print("Positional Arguments ")
for a in args:
print(a)
print("keyword arguments ")
for k,v in kwargs.items():
print(k+" = "+v)
return f(*args,**kwargs)
return logger
def time_execution(f):
"""
@f: Function to be wrapped with the logging of it's execution time
"""
def timing_wrapper(*args,**kwargs):
"""
wrapping function, it logs the execution time of the decorated function
"""
if CONSTANTS.TIME_EXECUTION:
start=time.time()
f(*args,**kwargs)
if CONSTANTS.TIME_EXECUTION:
end=time.time()
print("Execution time: "+str(end-start)+" seconds.")
return timing_wrapper
|
[
"time.time"
] |
[((964, 975), 'time.time', 'time.time', ([], {}), '()\n', (973, 975), False, 'import CONSTANTS, time\n'), ((1055, 1066), 'time.time', 'time.time', ([], {}), '()\n', (1064, 1066), False, 'import CONSTANTS, time\n')]
|
'''
util.py
'''
import os.path
import h5py
import numpy as np
import constants
import skimage.io
import skimage.transform
from scipy.io import loadmat
import glob
import os
import cPickle as pickle
import torch
from itertools import izip_longest
from glove import Glove
import torch
import torch.nn as nn
# Makes the directories of they don't already exist
def make_directories():
output_path = constants.SAVE_PATH
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Made output directory")
else:
print("WARNING: starting training with an existing outputs directory")
if not os.path.exists(output_path + 'weights/'):
os.makedirs(output_path + 'weights/')
print("Made weights directory")
if not os.path.exists(output_path + 'images/'):
os.makedirs(output_path + 'images/')
print("Made images directory")
# Loads a map from image file names to 'test', 'train', or 'val'
# Used in other functions to split data
def load_dataset_map():
ids = loadmat('data_constants/setid.mat')
# Flip train and test examples since otherwise there would be 6000 test
train_ids = ids['tstid'][0] - 1
test_ids = ids['trnid'][0] - 1
val_ids = ids['valid'][0] - 1
print(len(train_ids), len(val_ids), len(test_ids), "Train, val, test examples, respectively")
filenames = [name for name in os.listdir('Data/' + constants.ENTIRE_DATASET) if name.endswith('.jpg')]
image_paths = sorted(filenames)
dataset_map = {}
for i, name in enumerate(image_paths):
if i in train_ids:
dataset_map[name] = 'train'
elif i in test_ids:
dataset_map[name] ='test'
elif i in val_ids:
dataset_map[name] ='val'
else:
print("Invalid ID!")
return dataset_map
def load_flowers_capt_dict():
"""Use pickle to load the flowers captions"""
flowers_capt_dict = pickle.load(open( constants.FLOWERS_CAP_DICT, "rb" ))
return flowers_capt_dict
def load_coco_capt_dict():
"""Use pickle to load the MSCOCO captions"""
coco_capt_dict = pickle.load(open(constants.COCO_CAP_DICT, "rb"))
return coco_capt_dict
# Adapted from https://github.com/paarthneekhara/text-to-image
# Takes the directoy and file name of the hdf5 file that contains the word vectors
# Returns a dict from image to list of captions
def load_text_vec(directory, file_name, dataset_map):
h = h5py.File(os.path.join(directory, file_name))
train_captions, val_captions, test_captions = {}, {}, {}
for item in h.iteritems():
name = item[0]
if dataset_map[name] == 'train':
train_captions[name] = np.array(item[1])
elif dataset_map[name] =='val':
val_captions[name] = np.array(item[1])
elif dataset_map[name] =='test':
test_captions[name] = np.array(item[1])
else:
print("Invalid name")
return train_captions, val_captions, test_captions
# Gets images for the main function
def get_images(directory, file_name, save_path):
if os.path.exists(save_path):
image_dicts = torch.load(save_path)
train_image_dict, val_image_dict, test_image_dict = image_dicts
print("Loaded images")
else:
print("Loading images and separating into train/val/test sets")
path = os.path.join(directory, file_name)
filenames = train_captions.keys() + val_captions.keys() + test_captions.keys()
train_image_dict, val_image_dict, test_image_dict = util.load_images(path, filenames, dataset_map)
image_dicts = [train_image_dict, val_image_dict, test_image_dict]
torch.save(image_dicts, save_path)
return train_image_dict, val_image_dict, test_image_dict
# Takes in the directory and a list of file names and returns a dict of file name -> images
def load_images(directory, filenames, dataset_map):
train_image_dict, val_image_dict, test_image_dict = {}, {}, {}
for name in filenames:
image_file = os.path.join(directory + name)
curr_image = skimage.io.imread(image_file)
# Resize image to correct size as float 32
resized_image = skimage.transform.resize(curr_image, (constants.IMAGE_SIZE, constants.IMAGE_SIZE)).astype('float32')
if dataset_map[name] =='train':
train_image_dict[name] = resized_image
elif dataset_map[name] =='val':
val_image_dict[name] = resized_image
elif dataset_map[name] =='test':
test_image_dict[name] = resized_image
else:
print("Invalid name")
return train_image_dict, val_image_dict, test_image_dict
# custom weights initialization called on netG and netD
# from https://github.com/pytorch/examples/blob/master/dcgan/main.py
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Embedding') != -1:
m.weight.data.fill_(1.0)
elif classname.find('LSTM') != -1:
nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0)
def preprocess2(batch_input):
"""Inputs for self.embeddings in TextModel(). Batch_input must be numpy padded"""
batch_size, sent_len = batch_input.shape
offsets = [sent_len * i for i in range(batch_size)]
return batch_input.flatten(), offsets
def preprocess(batch_input):
"""If batch_input isn't numpy"""
glove = Glove()
flatten, offsets = [], []
index = 0
for ex in batch_input:
ex = ex.replace(',', ' ')
words = ex.strip('.').split()
result = []
for w in words:
try:
idx = glove.get_index(w)
result.append(idx)
except:
continue
# words = [glove.get_index(word) for word in words]
offsets.append(index)
flatten.extend(result)
index += len(result)
return torch.LongTensor(flatten), torch.LongTensor(offsets)
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
# From https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
# Iterates over an array in chunks
def grouper(array, n):
args = [iter(array)] * n
return izip_longest(*args)
# Show the generated image improves over time
def print_images(generated):
for img in generated:
image_done = img.data.numpy()
swap_image = np.swapaxes(image_done,1,2)
swap_image = np.swapaxes(swap_image,2,3)
plt.imshow(swap_image[0])
plt.show()
def get_text_description(text_caption_dict, batch_keys):
g_idx = [np.random.randint(len(text_caption_dict[batch_keys[0]])) for i in range(len(batch_keys))]
g_text_des = np.array([text_caption_dict[k][i] for k,i in zip(batch_keys, g_idx)])
# g_text_des = np.expand_dims(g_text_des, axis=0) ONLY NEED FOR 1 DIM
return g_text_des
def choose_wrong_image(image_dict, batch_keys):
wrong_image = []
for k in batch_keys:
wrong_key = np.random.choice(image_dict.keys())
while wrong_key == k:
wrong_key = np.random.choice(image_dict.keys())
wrong_image.append(image_dict[wrong_key])
wrong_image = np.array(wrong_image)
wrong_image = augment_image_batch(wrong_image)
wrong_image = np.swapaxes(wrong_image, 2, 3)
wrong_image = np.swapaxes(wrong_image, 1, 2)
return wrong_image
# Finds the real image for the given batch data
def choose_real_image(image_dict, batch_keys):
real_img = np.array([image_dict[k] for k in batch_keys])
real_img = augment_image_batch(real_img)
real_img = np.swapaxes(real_img, 2, 3)
real_img = np.swapaxes(real_img, 1, 2)
return real_img
def augment_image_batch(images):
batch_size = images.shape[0]
for i in range(batch_size):
curr = images[i, :, :, :]
if np.random.rand() > .5:
curr = np.flip(curr, 1)
images[i, :, :, :] = curr
return images
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
[
"numpy.flip",
"os.makedirs",
"scipy.io.loadmat",
"torch.LongTensor",
"itertools.izip_longest",
"torch.load",
"os.path.exists",
"torch.save",
"torch.nn.init.xavier_uniform",
"numpy.array",
"glove.Glove",
"numpy.swapaxes",
"numpy.random.rand",
"os.path.join",
"os.listdir"
] |
[((1039, 1074), 'scipy.io.loadmat', 'loadmat', (['"""data_constants/setid.mat"""'], {}), "('data_constants/setid.mat')\n", (1046, 1074), False, 'from scipy.io import loadmat\n'), ((3085, 3110), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (3099, 3110), False, 'import os\n'), ((5568, 5575), 'glove.Glove', 'Glove', ([], {}), '()\n', (5573, 5575), False, 'from glove import Glove\n'), ((6708, 6727), 'itertools.izip_longest', 'izip_longest', (['*args'], {}), '(*args)\n', (6720, 6727), False, 'from itertools import izip_longest\n'), ((7674, 7695), 'numpy.array', 'np.array', (['wrong_image'], {}), '(wrong_image)\n', (7682, 7695), True, 'import numpy as np\n'), ((7765, 7795), 'numpy.swapaxes', 'np.swapaxes', (['wrong_image', '(2)', '(3)'], {}), '(wrong_image, 2, 3)\n', (7776, 7795), True, 'import numpy as np\n'), ((7814, 7844), 'numpy.swapaxes', 'np.swapaxes', (['wrong_image', '(1)', '(2)'], {}), '(wrong_image, 1, 2)\n', (7825, 7844), True, 'import numpy as np\n'), ((7979, 8024), 'numpy.array', 'np.array', (['[image_dict[k] for k in batch_keys]'], {}), '([image_dict[k] for k in batch_keys])\n', (7987, 8024), True, 'import numpy as np\n'), ((8085, 8112), 'numpy.swapaxes', 'np.swapaxes', (['real_img', '(2)', '(3)'], {}), '(real_img, 2, 3)\n', (8096, 8112), True, 'import numpy as np\n'), ((8128, 8155), 'numpy.swapaxes', 'np.swapaxes', (['real_img', '(1)', '(2)'], {}), '(real_img, 1, 2)\n', (8139, 8155), True, 'import numpy as np\n'), ((433, 460), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (447, 460), False, 'import os\n'), ((470, 494), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (481, 494), False, 'import os\n'), ((634, 674), 'os.path.exists', 'os.path.exists', (["(output_path + 'weights/')"], {}), "(output_path + 'weights/')\n", (648, 674), False, 'import os\n'), ((684, 721), 'os.makedirs', 'os.makedirs', (["(output_path + 'weights/')"], {}), "(output_path + 'weights/')\n", (695, 721), False, 'import os\n'), ((773, 812), 'os.path.exists', 'os.path.exists', (["(output_path + 'images/')"], {}), "(output_path + 'images/')\n", (787, 812), False, 'import os\n'), ((822, 858), 'os.makedirs', 'os.makedirs', (["(output_path + 'images/')"], {}), "(output_path + 'images/')\n", (833, 858), False, 'import os\n'), ((2459, 2493), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (2471, 2493), False, 'import os\n'), ((3134, 3155), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (3144, 3155), False, 'import torch\n'), ((3356, 3390), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (3368, 3390), False, 'import os\n'), ((3667, 3701), 'torch.save', 'torch.save', (['image_dicts', 'save_path'], {}), '(image_dicts, save_path)\n', (3677, 3701), False, 'import torch\n'), ((4025, 4055), 'os.path.join', 'os.path.join', (['(directory + name)'], {}), '(directory + name)\n', (4037, 4055), False, 'import os\n'), ((6063, 6088), 'torch.LongTensor', 'torch.LongTensor', (['flatten'], {}), '(flatten)\n', (6079, 6088), False, 'import torch\n'), ((6090, 6115), 'torch.LongTensor', 'torch.LongTensor', (['offsets'], {}), '(offsets)\n', (6106, 6115), False, 'import torch\n'), ((6889, 6918), 'numpy.swapaxes', 'np.swapaxes', (['image_done', '(1)', '(2)'], {}), '(image_done, 1, 2)\n', (6900, 6918), True, 'import numpy as np\n'), ((6938, 6967), 'numpy.swapaxes', 'np.swapaxes', (['swap_image', '(2)', '(3)'], {}), '(swap_image, 2, 3)\n', (6949, 6967), True, 'import numpy as np\n'), ((1389, 1435), 'os.listdir', 'os.listdir', (["('Data/' + constants.ENTIRE_DATASET)"], {}), "('Data/' + constants.ENTIRE_DATASET)\n", (1399, 1435), False, 'import os\n'), ((2686, 2703), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2694, 2703), True, 'import numpy as np\n'), ((8320, 8336), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8334, 8336), True, 'import numpy as np\n'), ((8362, 8378), 'numpy.flip', 'np.flip', (['curr', '(1)'], {}), '(curr, 1)\n', (8369, 8378), True, 'import numpy as np\n'), ((2777, 2794), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2785, 2794), True, 'import numpy as np\n'), ((2870, 2887), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2878, 2887), True, 'import numpy as np\n'), ((5166, 5198), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['m.weight'], {}), '(m.weight)\n', (5188, 5198), True, 'import torch.nn as nn\n')]
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pinner',
version='0.1.0',
description='',
long_description=long_description,
url='',
author='<NAME>',
author_email='<EMAIL>',
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
'Programming Language :: Python :: 3',
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
python_requires='>=3.5, <4',
install_requires=[
'github-api-v3',
'py-dotenv'
],
extras_require={
'dev': ['check-manifest'],
'test': ['pytest']
},
tests_require=['pytest'],
setup_requires=['pytest-runner'],
entry_points={
'console_scripts': [
'pinner=pinner.main:main',
],
},
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((127, 149), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (139, 149), False, 'from os import path\n'), ((210, 238), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (219, 238), False, 'from os import path\n'), ((651, 677), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (664, 677), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python
import re
import logging
import argparse
import requests
from plexapi.myplex import MyPlexAccount
logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.getLogger('plexapi').setLevel(logging.CRITICAL)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("kodi_api_url", type=str, help="Kodi API URL IE: http://192.168.0.190:8080")
parser.add_argument("plex_username", type=str, help="Plex Account Username")
parser.add_argument("plex_password", type=str, help="Plex Account Password")
parser.add_argument("plex_server_name", type=str, help="Plex Server Name IE: media")
def get_json(rsp):
rsp.raise_for_status()
data = rsp.json()
if 'error' in data:
raise Exception('Kodi API Error: %s', data['error']['message'])
return data.get('result', {})
def get_movies(api_url):
payload = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetMovies',
'filter': {'field': 'playcount', 'operator': 'greaterthan', 'value': '0'},
'params': {'properties': ['playcount', 'imdbnumber', 'lastplayed']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload))
return dict((m['imdbnumber'], m) for m in data.get('movies', []))
def get_tv(api_url):
tv_shows = {}
payload_tv = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetTVShows',
'params': {'properties': ['uniqueid']},
'id': 'libTVShows'
}
data = get_json(requests.post(api_url, json=payload_tv))
tv_shows_data = dict((m['tvshowid'], m) for m in data.get('tvshows', []))
payload_ep = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetEpisodes',
'params': {'properties': ['season', 'episode', 'uniqueid', 'playcount', 'tvshowid']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload_ep))
for ep in data.get('episodes', []):
tvdb_id = tv_shows_data.get(ep['tvshowid'], {}).get('uniqueid', {}).get('unknown')
if not tvdb_id:
continue
if tvdb_id not in tv_shows:
tv_shows[tvdb_id] = {}
tv_show = tv_shows[tvdb_id]
if ep['season'] not in tv_show:
tv_show[ep['season']] = {}
tv_show_season = tv_show[ep['season']]
tv_show_season[ep['episode']] = ep
return tv_shows
if __name__ == '__main__':
args = parser.parse_args()
kodi_api_url = '%s/jsonrpc' % args.kodi_api_url.rstrip('/')
plex = None
try:
account = MyPlexAccount(args.plex_username, args.plex_password)
plex = account.resource(args.plex_server_name).connect()
except Exception as e:
log.exception('Error connecting to Plex %s' % str(e))
exit(1)
# TVShows
try:
log.info('Getting Kodi Episodes List')
kodi_episodes = get_tv(kodi_api_url)
log.info('Getting Plex TVShows')
plex_episodes = plex.library.section('TV Shows').search(unwatched=True, libtype='episode')
log.info('Sorting through Plex Episodes to detect watched from Kodi')
for epsiode in plex_episodes:
# Only support TheTVDB parsed shows
tvdb_match = re.search(r'thetvdb://([0-9]+)/', epsiode.guid)
if tvdb_match:
kodi_ep = kodi_episodes.get(tvdb_match.group(1), {}).get(epsiode.seasonNumber, {}).get(epsiode.index)
if kodi_ep:
if kodi_ep.get('playcount') > 0 and not epsiode.isWatched:
log.info('Marking epsiode %s S%sE%s as watched' %
(epsiode.grandparentTitle, epsiode.seasonNumber, epsiode.index))
epsiode.markWatched()
except Exception as e:
log.exception('Error processing TVShows %s' % str(e))
exit(1)
# Movies
try:
log.info('Getting Kodi Movie List')
kodi_movies = []
kodi_movies = get_movies(kodi_api_url)
log.info('Getting Plex Movies')
plex_movies = plex.library.section('Movies').search(unwatched=True)
log.info('Sorting through Plex Movies to detect watched from Kodi')
for movie in plex_movies:
# Only support IMDB parsed movies
imdb_match = re.search(r'((?:nm|tt)[\d]{7})', movie.guid)
if imdb_match:
imdb_id = imdb_match.group(1)
kodi_movie = kodi_movies.get(imdb_id)
if kodi_movie:
if kodi_movie.get('playcount') > 0 and not movie.isWatched:
log.info('Marking movie %s as watched' % movie.title)
movie.markWatched()
except Exception as e:
log.critical('Error processing Movies %s' % str(e))
exit(1)
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"requests.post",
"plexapi.myplex.MyPlexAccount",
"re.search",
"logging.getLogger"
] |
[((121, 182), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (140, 182), False, 'import logging\n'), ((245, 272), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (262, 272), False, 'import logging\n'), ((283, 308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (306, 308), False, 'import argparse\n'), ((183, 211), 'logging.getLogger', 'logging.getLogger', (['"""plexapi"""'], {}), "('plexapi')\n", (200, 211), False, 'import logging\n'), ((1163, 1199), 'requests.post', 'requests.post', (['api_url'], {'json': 'payload'}), '(api_url, json=payload)\n', (1176, 1199), False, 'import requests\n'), ((1495, 1534), 'requests.post', 'requests.post', (['api_url'], {'json': 'payload_tv'}), '(api_url, json=payload_tv)\n', (1508, 1534), False, 'import requests\n'), ((1844, 1883), 'requests.post', 'requests.post', (['api_url'], {'json': 'payload_ep'}), '(api_url, json=payload_ep)\n', (1857, 1883), False, 'import requests\n'), ((2527, 2580), 'plexapi.myplex.MyPlexAccount', 'MyPlexAccount', (['args.plex_username', 'args.plex_password'], {}), '(args.plex_username, args.plex_password)\n', (2540, 2580), False, 'from plexapi.myplex import MyPlexAccount\n'), ((3198, 3244), 're.search', 're.search', (['"""thetvdb://([0-9]+)/"""', 'epsiode.guid'], {}), "('thetvdb://([0-9]+)/', epsiode.guid)\n", (3207, 3244), False, 'import re\n'), ((4259, 4303), 're.search', 're.search', (['"""((?:nm|tt)[\\\\d]{7})"""', 'movie.guid'], {}), "('((?:nm|tt)[\\\\d]{7})', movie.guid)\n", (4268, 4303), False, 'import re\n')]
|
from tftk.image.dataset import Mnist
from tftk.image.dataset import Food101
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context
from tftk.image.model.representation import SimpleRepresentationModel, add_projection_layers
from tftk.train.image import ImageTrain
from tftk import ENABLE_SUSPEND_RESUME_TRAINING, ResumeExecutor
import tensorflow as tf
class MovingAverageCallback(tf.keras.callbacks.Callback):
def __init__(self, model):
self.model = model
def on_train_begin(self, logs=None):
print("Starting training")
def on_train_end(self, logs=None):
print("Stop training")
def on_epoch_begin(self, epoch, logs=None):
print("\nStart epoch")
def on_epoch_end(self, epoch, logs=None):
print("\nOn epoch end, updating moving average")
w1 = self.model.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
self.model.set_weights(w2)
def get_moving_average_callback(model):
m = model
def moving_average(loss, acc):
print("on epoch end")
w1 = m.get_weights()
w2 = []
for a in w1:
print(type(a))
w2.append( a*0.8 )
m.set_weights(w2)
return moving_average
def custom_loss(y_pred, y_true):
y_1, y_2 = y_pred
diff = y_1 - y_2
loss = tf.keras.backend.abs(diff)
return loss
def reinforcement(data):
img = data["image"]
label = data["label"]
return ([img,img],[img,img])
# supervised
def supervised_dataset(dataset:tf.data.Dataset, max_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] < max_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
label = data['label']
label = tf.one_hot(label, max_label)
return image, label
return filtered.map(supervised_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def pretext_dataset(dataset:tf.data.Dataset, start_label:int)->tf.data.Dataset:
filtered = dataset.filter(lambda data:data['label'] >= start_label)
def supervised_transform(data):
image = data['image']
image = tf.cast(image, tf.float32)
image = image / 255.0
def random_transform(image):
pass
if __name__ == '__main__':
context = Context.init_context(TRAINING_NAME='')
# ENABLE_SUSPEND_RESUME_TRAINING()
BATCH_SIZE = 500
CLASS_NUM = 10
IMAGE_SIZE = 28
EPOCHS = 2
SHUFFLE_SIZE = 1000
# if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:
# train, train_len = Mnist.get_train_dataset()
# validation, validation_len = Mnist.get_test_dataset()
# train = train.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# validation = validation.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))
# train = train.map(reinforcement)
# online_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# target_model = SimpleRepresentationModel.get_representation_model(input_shape=(28,28,1))
# print(online_model.layers)
# online_projection_model = add_projection_layers(online_model)
# target_projection_model = add_projection_layers(target_model)
# input_online = online_model.layers[0].input
# input_target = target_model.layers[0].input
# output_online = online_model.layers[-1].output
# output_target = target_model.layers[-1].output
# mearged_model = tf.keras.Model(inputs=[input_online,input_target], outputs=[output_online,output_target])
# mearged_model.summary()
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# mearged_model.compile(optimizer=optimizer, loss=custom_loss)
# train = train.take(10)
# y = mearged_model.predict(train)
# print(y)
# optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs)
# online_projection = add_projection_layers(online_model)
# target_projection = add_projection_layers(target_model)
# inputs = [online_projection.input, target_projection.input]
# outputs = [online_projection.output, target_projection.output]
# total_model = tf.keras.Model(inputs=inputs, outputs=outputs)
# optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
# model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,1),classes=CLASS_NUM)
# callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=16)
# callbacks.append(MovingAverageCallback(model))
# ImageTrain.train_image_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=SHUFFLE_SIZE,model=model,callbacks=callbacks,optimizer=optimizer,loss="categorical_crossentropy",max_epoch=EPOCHS)
# w1 = model.get_weights()
# # print(type(w1))
# w2 = []
# for a in w1:
# print(type(a))
# w2.append( a*0.8 )
# model.set_weights(w2)
|
[
"tensorflow.cast",
"tensorflow.keras.backend.abs",
"tensorflow.one_hot",
"tftk.Context.init_context"
] |
[((1544, 1570), 'tensorflow.keras.backend.abs', 'tf.keras.backend.abs', (['diff'], {}), '(diff)\n', (1564, 1570), True, 'import tensorflow as tf\n'), ((2593, 2631), 'tftk.Context.init_context', 'Context.init_context', ([], {'TRAINING_NAME': '""""""'}), "(TRAINING_NAME='')\n", (2613, 2631), False, 'from tftk import Context\n'), ((1950, 1976), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (1957, 1976), True, 'import tensorflow as tf\n'), ((2053, 2081), 'tensorflow.one_hot', 'tf.one_hot', (['label', 'max_label'], {}), '(label, max_label)\n', (2063, 2081), True, 'import tensorflow as tf\n'), ((2444, 2470), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2451, 2470), True, 'import tensorflow as tf\n')]
|
# Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
from typing import List
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_element_type import ExtractElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_renditions_element_type import \
ExtractRenditionsElementType
from adobe.pdfservices.operation.pdfops.options.extractpdf.table_structure_type import TableStructureType
class ExtractPDFOptions():
""" An Options Class that defines the options for ExtractPDFOperation.
.. code-block:: python
extract_pdf_options: ExtractPDFOptions = ExtractPDFOptions.builder() \\
.with_elements_to_extract([ExtractElementType.TEXT, ExtractElementType.TABLES]) \\
.with_get_char_info(True) \\
.with_table_structure_format(TableStructureType.CSV) \\
.with_elements_to_extract_renditions([ExtractRenditionsElementType.FIGURES, ExtractRenditionsElementType.TABLES]) \\
.with_include_styling_info(True) \\
.build()
"""
def __init__(self, elements_to_extract, elements_to_extract_renditions, get_char_info, table_output_format,
include_styling_info=None):
self._elements_to_extract = elements_to_extract
self._elements_to_extract_renditions = elements_to_extract_renditions
self._get_char_info = get_char_info
self._table_output_format = table_output_format
self._include_styling_info = include_styling_info
self._logger = logging.getLogger(__name__)
@property
def elements_to_extract(self):
""" List of pdf element types to be extracted in a structured format from input file"""
return self._elements_to_extract
@property
def elements_to_extract_renditions(self):
""" List of pdf element types whose renditions needs to be extracted from input file"""
return self._elements_to_extract_renditions
@property
def get_char_info(self):
""" Boolean specifying whether to add character level bounding boxes to output json """
return self._get_char_info
@property
def table_output_format(self):
""" export table in specified format - currently csv supported """
return self._table_output_format
@property
def include_styling_info(self):
""" Boolean specifying whether to add PDF Elements Styling Info to output json """
return self._include_styling_info
@staticmethod
def builder():
"""Returns a Builder for :class:`ExtractPDFOptions`
:return: The builder class for ExtractPDFOptions
:rtype: ExtractPDFOptions.Builder
"""
return ExtractPDFOptions.Builder()
class Builder:
""" The builder for :class:`ExtractPDFOptions`.
"""
def __init__(self):
self._elements_to_extract = None
self._elements_to_extract_renditions = None
self._table_output_format = None
self._get_char_info = None
self._include_styling_info = None
def _init_elements_to_extract(self):
if not self._elements_to_extract:
self._elements_to_extract = []
def _init_elements_to_extract_renditions(self):
if not self._elements_to_extract_renditions:
self._elements_to_extract_renditions = []
def with_element_to_extract(self, element_to_extract: ExtractElementType):
"""
adds a pdf element type for extracting structured information.
:param element_to_extract: ExtractElementType to be extracted
:type element_to_extract: ExtractElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract is None.
"""
if element_to_extract and element_to_extract in ExtractElementType:
self._init_elements_to_extract()
self._elements_to_extract.append(element_to_extract)
else:
raise ValueError("Only ExtractElementType enum is accepted for element_to_extract")
return self
def with_elements_to_extract(self, elements_to_extract: List[ExtractElementType]):
"""
adds a list of pdf element types for extracting structured information.
:param elements_to_extract: List of ExtractElementType to be extracted
:type elements_to_extract: List[ExtractElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract and all(element in ExtractElementType for element in elements_to_extract):
self._init_elements_to_extract()
self._elements_to_extract.extend(elements_to_extract)
else:
raise ValueError("Only ExtractElementType enum List is accepted for elements_to_extract")
return self
def with_element_to_extract_renditions(self, element_to_extract_renditions: ExtractRenditionsElementType):
"""
adds a pdf element type for extracting rendition.
:param element_to_extract_renditions: ExtractRenditionsElementType whose renditions have to be extracted
:type element_to_extract_renditions: ExtractRenditionsElementType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if element_to_extract_renditions is None.
"""
if element_to_extract_renditions and element_to_extract_renditions in ExtractRenditionsElementType:
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.append(element_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum is accepted for element_to_extract_renditions")
return self
def with_elements_to_extract_renditions(self, elements_to_extract_renditions: List[ExtractRenditionsElementType]):
"""
adds a list of pdf element types for extracting rendition.
:param elements_to_extract_renditions: List of ExtractRenditionsElementType whose renditions have to be extracted
:type elements_to_extract_renditions: List[ExtractRenditionsElementType]
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if elements_to_extract is None or empty list.
"""
if elements_to_extract_renditions and all(
element in ExtractRenditionsElementType for element in elements_to_extract_renditions):
self._init_elements_to_extract_renditions()
self._elements_to_extract_renditions.extend(elements_to_extract_renditions)
else:
raise ValueError("Only ExtractRenditionsElementType enum List is accepted for elements_to_extract_renditions")
return self
def with_table_structure_format(self, table_structure: TableStructureType):
"""
adds the table structure format (currently csv only) for extracting structured information.
:param table_structure: TableStructureType to be extracted
:type table_structure: TableStructureType
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
:raises ValueError: if table_structure is None.
"""
if table_structure and table_structure in TableStructureType:
self._table_output_format = table_structure
else:
raise ValueError("Only TableStructureType enum is accepted for table_structure_format")
return self
def with_get_char_info(self, get_char_info: bool):
"""
sets the Boolean specifying whether to add character level bounding boxes to output json
:param get_char_info: Set True to extract character level bounding boxes information
:type get_char_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._get_char_info = get_char_info
return self
def with_include_styling_info(self, include_styling_info: bool):
"""
sets the Boolean specifying whether to add PDF Elements Styling Info to output json
:param include_styling_info: Set True to extract PDF Elements Styling Info
:type include_styling_info: bool
:return: This Builder instance to add any additional parameters.
:rtype: ExtractPDFOptions.Builder
"""
self._include_styling_info = include_styling_info
return self
def build(self):
return ExtractPDFOptions(self._elements_to_extract, self._elements_to_extract_renditions,
self._get_char_info,
self._table_output_format, self._include_styling_info)
|
[
"logging.getLogger"
] |
[((2087, 2114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2104, 2114), False, 'import logging\n')]
|
import datetime
import time
from urllib.parse import urlencode
import requests
from pandas import read_csv
from geodataimport.compat import StringIO, binary_type, bytes_to_str
from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates
class _GeoData(object):
"""
Parameters
----------
symbols : {str, List[str]}
String symbol of like of symbols
start : string, int, date, datetime, Timestamp
Starting date. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, int, date, datetime, Timestamp
Ending date
retry_count : int, default 3
Number of times to retry query request.
pause : float, default 0.1
Time, in seconds, of the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
freq : {str, None}
Frequency to use in select readers
"""
_chunk_size = 1024 * 1024
_format = "string"
def __init__(
self,
symbols,
start=None,
end=None,
retry_count=5,
pause=0.1,
timeout=30,
session=None,
freq=None,
asynchronous=False,
**kwargs,
):
self.symbols = symbols
start, end = _sanitize_dates(start or self.default_start_date, end)
self.start = start
self.end = end
if not isinstance(retry_count, int) or retry_count < 0:
raise ValueError("'retry_count' must be integer larger than 0")
self.retry_count = retry_count
self.pause = pause
self.timeout = timeout
self.pause_multiplier = 1
self.session = _init_session(
session, retry=retry_count, asynchronous=asynchronous
)
self.freq = freq
def close(self):
"""Close network session"""
self.session.close()
@property
def default_start_date(self):
"""Default start date for reader. Defaults to 5 years before current date"""
today = datetime.date.today()
return today - datetime.timedelta(days=365 * 5)
@property
def url(self):
"""API URL"""
# must be overridden in subclass
raise NotImplementedError
@property
def params(self):
"""Parameters to use in API calls"""
return None
def _read_one_data(self, url, params):
""" read one data from specified URL """
if self._format == "string":
out = self._read_url_as_StringIO(url, params=params)
elif self._format == "json":
out = self._get_response(url, params=params).json()
else:
raise NotImplementedError(self._format)
return self._read_lines(out)
def _read_url_as_StringIO(self, url, params=None):
"""
Open url (and retry)
"""
response = self._get_response(url, params=params)
text = self._sanitize_response(response)
out = StringIO()
if len(text) == 0:
service = self.__class__.__name__
raise IOError(
"{} request returned no data; check URL for invalid "
"inputs: {}".format(service, self.url)
)
if isinstance(text, binary_type):
out.write(bytes_to_str(text))
else:
out.write(text)
out.seek(0)
return out
@staticmethod
def _sanitize_response(response):
"""
Hook to allow subclasses to clean up response data
"""
return response.content
def _get_response(self, url, params=None, headers=None):
""" send raw HTTP request to get requests.Response from the specified url
Parameters
----------
url : str
target URL
params : dict or None
parameters passed to the URL
"""
# initial attempt + retry
pause = self.pause
last_response_text = ""
for _ in range(self.retry_count + 1):
response = self.session.get(
url, params=params, headers=headers, timeout=self.timeout
)
if response.status_code == requests.codes["ok"]:
return response
if response.encoding:
last_response_text = response.text.encode(response.encoding)
time.sleep(pause)
# Increase time between subsequent requests, per subclass.
pause *= self.pause_multiplier
# Get a new breadcrumb if necessary, in case ours is invalidated
if isinstance(params, list) and "crumb" in params:
params["crumb"] = self._get_crumb(self.retry_count)
# If our output error function returns True, exit the loop.
if self._output_error(response):
break
if params is not None and len(params) > 0:
url = url + "?" + urlencode(params)
msg = "Unable to read URL: {0}".format(url)
if last_response_text:
msg += "\nResponse Text:\n{0}".format(last_response_text)
raise RemoteDataError(msg)
def _output_error(self, out):
"""If necessary, a service can implement an interpreter for any non-200
HTTP responses.
Parameters
----------
out: bytes
The raw output from an HTTP request
Returns
-------
boolean
"""
return False
def _get_crumb(self, *args):
""" To be implemented by subclass """
raise NotImplementedError("Subclass has not implemented method.")
def _read_lines(self, out):
rs = read_csv(out, index_col=0, parse_dates=True, na_values=("-", "null"))[::-1]
# Needed to remove blank space character in header names
rs.columns = list(map(lambda x: x.strip(), rs.columns.values.tolist()))
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
# Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode("unicode_escape").encode(
"ascii", "ignore"
)
except AttributeError:
# Python 3 string has no decode method.
rs.index.name = rs.index.name.encode("ascii", "ignore").decode()
return rs
|
[
"geodataimport.utils._sanitize_dates",
"geodataimport.compat.StringIO",
"pandas.read_csv",
"urllib.parse.urlencode",
"geodataimport.compat.bytes_to_str",
"datetime.date.today",
"geodataimport.utils.RemoteDataError",
"geodataimport.utils._init_session",
"time.sleep",
"datetime.timedelta"
] |
[((1326, 1380), 'geodataimport.utils._sanitize_dates', '_sanitize_dates', (['(start or self.default_start_date)', 'end'], {}), '(start or self.default_start_date, end)\n', (1341, 1380), False, 'from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates\n'), ((1727, 1795), 'geodataimport.utils._init_session', '_init_session', (['session'], {'retry': 'retry_count', 'asynchronous': 'asynchronous'}), '(session, retry=retry_count, asynchronous=asynchronous)\n', (1740, 1795), False, 'from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates\n'), ((2081, 2102), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2100, 2102), False, 'import datetime\n'), ((3021, 3031), 'geodataimport.compat.StringIO', 'StringIO', ([], {}), '()\n', (3029, 3031), False, 'from geodataimport.compat import StringIO, binary_type, bytes_to_str\n'), ((5149, 5169), 'geodataimport.utils.RemoteDataError', 'RemoteDataError', (['msg'], {}), '(msg)\n', (5164, 5169), False, 'from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates\n'), ((2126, 2158), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365 * 5)'}), '(days=365 * 5)\n', (2144, 2158), False, 'import datetime\n'), ((4400, 4417), 'time.sleep', 'time.sleep', (['pause'], {}), '(pause)\n', (4410, 4417), False, 'import time\n'), ((5695, 5764), 'pandas.read_csv', 'read_csv', (['out'], {'index_col': '(0)', 'parse_dates': '(True)', 'na_values': "('-', 'null')"}), "(out, index_col=0, parse_dates=True, na_values=('-', 'null'))\n", (5703, 5764), False, 'from pandas import read_csv\n'), ((3335, 3353), 'geodataimport.compat.bytes_to_str', 'bytes_to_str', (['text'], {}), '(text)\n', (3347, 3353), False, 'from geodataimport.compat import StringIO, binary_type, bytes_to_str\n'), ((4963, 4980), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (4972, 4980), False, 'from urllib.parse import urlencode\n')]
|
import requests
import params
base_url = params.url
url = base_url+"v2/public/user/create"
user_email_domain = params.email_domain
# User Inputs
first_name = input("Type user's first name: ")
last_name = input("Type user's last name: ")
# Create user creds from inputs
full_name = first_name + " " + last_name
user_name = first_name + "." + last_name + user_email_domain
email_address = first_name + "." + last_name + user_email_domain
payload = {
"authentication_type": "saml",
"access_level": "BASIC_USER",
"username": user_name,
"authentication_server_id": 1,
"name": full_name,
"email": email_address
}
headers = {
"Accept": "application/json",
"content-type": "application/json",
"accept-encoding": "gzip",
"Api-Key": params.api_key
}
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
|
[
"requests.request"
] |
[((831, 891), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'json': 'payload', 'headers': 'headers'}), "('POST', url, json=payload, headers=headers)\n", (847, 891), False, 'import requests\n')]
|
'''
Created on Sep 16, 2010
kNN: k Nearest Neighbors
Input: inX: vector to compare to existing dataset (1xN)
dataSet: size m data set of known vectors (NxM)
labels: data set labels (1xM vector)
k: number of neighbors to use for comparison (should be an odd number)
Output: the most popular class label
@author: pbharrin
'''
'''
k近值算法示例
'''
import matplotlib.pyplot as plt
import operator
from numpy import *
from numpy.ma.core import *
'''
使用k近邻算法改进约会网站的配对效果,算法步骤如下:
(1) 收集数据:提供文本文件。
(2) 准备数据:使用Python解析文本文件。
(3) 分析数据:使用Matplotlib画二维扩散图。
(4) 训练算法:此步骤不适用于k-近邻算法。
(5) 测试算法:使用海伦提供的部分数据作为测试样本。测试样本和非测试样本的区别在于:测试样本是已经完成分类的数据,如果预测分类与实际类别不同,则标记为一个错误。
(6) 使用算法:产生简单的命令行程序,然后海伦可以输入一些特征数据以判断对方是否为自己喜欢的类型。
'''
def dating_class_test():
hoRatio = 0.50 # hold out 10%
# 从指定文件中载入数据,载入数据为每年获得的飞行常客里程数,玩视频游戏所耗时间百分比,每周消费的冰淇淋公升数
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
# print(normMat[0:20])
# print(ranges)
# print(minVals)
# exit()
m = normMat.shape[0] # 取矩阵行数,rage函数返回包含行列数的元组对象
numTestVecs = int(m * hoRatio) # 取测试行数
errorCount = 0.0
for i in range(numTestVecs):
# 取出矩阵每一行,非测试行,非测试行的分类标签
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3) # 求预测分类值
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]): errorCount += 1.0 # 预测分类与实际类别不同,则标记为一个错误
print("the total error rate is: %f, error count is %d" % (errorCount / float(numTestVecs), errorCount))
# 分类器方法,求预测分类值
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] # 行数
diffMat = tile(inX, (dataSetSize, 1)) - dataSet # 求出测试样本与非测试样本的差值
sqDiffMat = diffMat ** 2 # 求方
sqDistances = sqDiffMat.sum(axis=1) # 行求和
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# 将文本文件转换为NumPy矩阵
# Input: 文本文件路径
# Output: 包含训练样本数据的NumPy矩阵和类标签向量
def file2matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) # 得到文件行数
returnMat = zeros((numberOfLines, 3)) # 创建Numpy矩阵并初始化0
classLabelVector = [] # 初始化分类标签向量,存放文本行中最后一列分类标签
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip() # 去除回车符
listFromLine = line.split('\t')
returnMat[index, :] = listFromLine[0:3] # 给矩阵填值
classLabelVector.append(int(listFromLine[-1])) # 取出最后一个字段作为标签值存入向量对象
index += 1
return returnMat, classLabelVector
# 归一化特征值,即将飞行公里数值转化为[0,1]区间值
# newValue = (oldValue-min)/(max-min)
# dataset: NumPy矩阵
# 返回值:归一化的numPy矩阵, 最大最小飞行公里数的差值行, 最小矩阵行
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals # 取最大最小飞行公里数的差值
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m, 1)) # 矩阵每一行都与最小矩阵行做差值运算
normDataSet = normDataSet / tile(ranges, (m, 1)) # element wise divide,上一步计算出的矩阵每一行去除最大最小插值矩阵
return normDataSet, ranges, minVals
# 读取NumPy矩阵格式的特征值,显示为散列图
def test1():
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
print(datingDataMat[0:20])
print(datingLabels[0:20])
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋公升数
# plt.show()
ax.scatter(datingDataMat[:, 0], datingDataMat[:, 1], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋公升数
plt.show()
def test2():
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
print(normMat)
print(minVals)
print(ranges)
print(normMat.shape)
# print("========================================================")
# test1()
# print("========================================================")
# test2()
dating_class_test()
|
[
"matplotlib.pyplot.figure",
"operator.itemgetter",
"matplotlib.pyplot.show"
] |
[((3799, 3811), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3809, 3811), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4151, 4153), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2268), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2265, 2268), False, 'import operator\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.