code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""
This script interfaces with the codepost.io API to produce exemplar reports
for ABET accreditation.
For a particular assignment, a report includes an assignment summary
(basic info and stats) as well as the full assessment of 3 student
examples of an A, B, and C submission. The report includes all line-by-line
grader comments (and point deductions) as well as source files. Source files
are formatted in markdown. In the codepost.io web client the comments would
be embedded directly in the source files but for this report they are collected
in the summary.
A, B, and C examples are automatically chosen from all graded submissions.
The top submission is chosen for the A example while the B/C are chosen
to be the closest to a 85%/75% score based on the total number of points of
the assignment.
The report is written to both a markdown-formatted output file as well as
a PDF version (which is produced from the markdown using pandoc/latex via
a system call so these are expected to be installed and available).
You can run this script in one of two modes: you can provide either a single
assignment ID which will produce a single report for that assignment only, or
you can provide a course ID which will produce (separate) reports for all
assignments in the course. In either case, the IDs must be valid codepost.io
IDs. Optionally, you can provide your own codepost API key via the command
line, otherwise it must be specified in the config.py file.
"""
import argparse
import os
import codepost
from config import config
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--codePostApiKey',
type=str,
default=config.codePostApiKey,
help='Optionally provide a codepost API key to use. By default the API key in the config.py file is used.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--codePostCourseId',
type=int,
help='Generates ABET reports for *every* assignment in the provided codepost course.')
group.add_argument('--codePostAssignmentId',
type=int,
help='Generates a single ABET report for the provided codepost assignment.')
args = parser.parse_args()
def submissionToMarkdown(submission,title,assignmentPts):
"""
Returns both a summary and source files of the provided submission as
markdown-formatted strings.
"""
details = ""
result = f"""
## {title} Example
* Student(s): {submission.students}
* Score: {submission.grade:.1f} / {assignmentPts:.1f} = {(100*submission.grade/assignmentPts):.2f}%
"""
for fileId in submission.files:
f = codepost.file.retrieve(id=fileId.id)
fileName = f.name
# fools be puttin unicode shite in their source, so...
fileContents = f.code.encode('utf-8').decode('ascii','ignore')
fileExtension = f.extension
fileGraderCommentIds = [x.id for x in f.comments]
result += f" * Source File: `{fileName}`\n"
details += f"## {title} Example - `{fileName}`\n"
details += f"```{fileExtension}\n{fileContents}\n```\n"
for commentId in fileGraderCommentIds:
c = codepost.comment.retrieve(id=commentId)
cleanText = c.text.replace("\n\n", "\n")
result += f" * Lines {c.startLine:d} - {c.endLine:d} (-{c.pointDelta:.1f}): {cleanText:s}\n"
return result, details
def getAssignmentReport(assignment):
"""
Produces an ABET assignment report (as a markdown-formatted string)
for the given assignment (which is expected to be a codepost API
object) by pulling all relevant data as well as source
code files (and grader comments) for randomly selected A, B and C samples
"""
courseId = assignment.course
course = codepost.course.retrieve(id=courseId)
courseName = course.name
coursePeriod = course.period
assignmentName = assignment.name
assignmentPts = assignment.points
assignmentMean = assignment.mean
assignmentMedian = assignment.median
summary = f"""
# {courseName} - {coursePeriod}
## {assignmentName}
* Points: {assignmentPts}
* Mean: {assignmentMean}
* Median: {assignmentMedian}\n\n"""
# find ideal A, B, C samples
submissions = assignment.list_submissions()
aSubmission = submissions[0]
bSubmission = submissions[0]
cSubmission = submissions[0]
# we only expect 1 submission per student since submissions are via our
# scripts, but in any case, find the 3 closest to A=max%, B = 85%, C = 75%
for submission in submissions:
if submission.grade > aSubmission.grade:
aSubmission = submission
if abs(submission.grade / assignmentPts - .85) < abs(bSubmission.grade / assignmentPts - .85):
bSubmission = submission
if abs(submission.grade / assignmentPts - .75) < abs(cSubmission.grade / assignmentPts - .75):
cSubmission = submission
aSummary, aDetail = submissionToMarkdown(aSubmission,"A",assignmentPts)
bSummary, bDetail = submissionToMarkdown(bSubmission,"B",assignmentPts)
cSummary, cDetail = submissionToMarkdown(cSubmission,"C",assignmentPts)
return summary + aSummary + bSummary + cSummary + "\n\n" + aDetail + bDetail + cDetail
def produceCourseReports(courseId):
"""
Produces ABET reports (as both md and pdf files) for all assignments in
the specified course
"""
course = codepost.course.retrieve(id=courseId)
for a in course.assignments:
assignmentId = a.id
produceAssignmentReport(assignmentId)
def produceAssignmentReport(assignmentId):
"""
Produces a single report (as an md and pdf file) for the specified assignment
"""
a = codepost.assignment.retrieve(id=assignmentId)
assignmentName = a.name
baseFileName = assignmentName.replace(" ", "_")
assignmentId = a.id
report = getAssignmentReport(a)
fileNameMd = baseFileName + ".md"
fileNamePdf = baseFileName + ".pdf"
f = open(fileNameMd, "w")
f.write(report)
f.close()
os.system("pandoc -s -V geometry:margin=1in -o "+fileNamePdf+" "+fileNameMd)
return None
codePostApiKey = args.codePostApiKey
codepost.configure_api_key(codePostApiKey)
if args.codePostCourseId:
produceCourseReports(args.codePostCourseId)
elif args.codePostAssignmentId:
produceAssignmentReport(args.codePostAssignmentId)
else:
print("ERROR: neither course ID nor assignment ID specified")
|
[
"argparse.ArgumentParser",
"codepost.assignment.retrieve",
"codepost.file.retrieve",
"os.system",
"codepost.course.retrieve",
"codepost.comment.retrieve",
"codepost.configure_api_key"
] |
[((1555, 1658), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1578, 1658), False, 'import argparse\n'), ((6096, 6138), 'codepost.configure_api_key', 'codepost.configure_api_key', (['codePostApiKey'], {}), '(codePostApiKey)\n', (6122, 6138), False, 'import codepost\n'), ((3811, 3848), 'codepost.course.retrieve', 'codepost.course.retrieve', ([], {'id': 'courseId'}), '(id=courseId)\n', (3835, 3848), False, 'import codepost\n'), ((5376, 5413), 'codepost.course.retrieve', 'codepost.course.retrieve', ([], {'id': 'courseId'}), '(id=courseId)\n', (5400, 5413), False, 'import codepost\n'), ((5653, 5698), 'codepost.assignment.retrieve', 'codepost.assignment.retrieve', ([], {'id': 'assignmentId'}), '(id=assignmentId)\n', (5681, 5698), False, 'import codepost\n'), ((5965, 6051), 'os.system', 'os.system', (["('pandoc -s -V geometry:margin=1in -o ' + fileNamePdf + ' ' + fileNameMd)"], {}), "('pandoc -s -V geometry:margin=1in -o ' + fileNamePdf + ' ' +\n fileNameMd)\n", (5974, 6051), False, 'import os\n'), ((2748, 2784), 'codepost.file.retrieve', 'codepost.file.retrieve', ([], {'id': 'fileId.id'}), '(id=fileId.id)\n', (2770, 2784), False, 'import codepost\n'), ((3235, 3274), 'codepost.comment.retrieve', 'codepost.comment.retrieve', ([], {'id': 'commentId'}), '(id=commentId)\n', (3260, 3274), False, 'import codepost\n')]
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iree.tf.support.tf_utils."""
from absl.testing import parameterized
from iree.tf.support import tf_utils
import numpy as np
import tensorflow as tf
class UtilsTests(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([('int8_to_i8', np.int8, 'i8'),
('int32_to_i32', np.int32, 'i32'),
('float32_to_f32', np.float32, 'f32'),
('float64_to_f64', np.float64, 'f64')])
def test_to_mlir_type(self, numpy_type, mlir_type):
self.assertEqual(tf_utils.to_mlir_type(numpy_type), mlir_type)
@parameterized.named_parameters([
('single_i32', [np.array([1, 2], dtype=np.int32)], '2xi32=1 2'),
('single_f32', [np.array([1, 2], dtype=np.float32)], '2xf32=1.0 2.0'),
])
def test_save_input_values(self, inputs, inputs_str):
self.assertEqual(tf_utils.save_input_values(inputs), inputs_str)
def test_apply_function(self):
inputs = [1, [2, 3], (4, 5), {'6': 6, '78': [7, 8]}]
expected = [0, [1, 2], (3, 4), {'6': 5, '78': [6, 7]}]
result = tf_utils.apply_function(inputs, lambda x: x - 1)
self.assertEqual(result, expected)
self.assertNotEqual(inputs, expected)
@parameterized.named_parameters([
{
'testcase_name': 'all the same',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': True,
},
{
'testcase_name': 'wrong int',
'array_c': np.array([1, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong string',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['a', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong float',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([1.0, 0.1, 0.2]),
'tar_same': False,
},
])
def test_recursive_check_same(self, array_c, array_d, array_e, tar_same):
# yapf: disable
ref = {
'a': 1,
'b': [
{'c': np.array([0, 1, 2])},
{'d': np.array(['0', '1', '2'])},
{'e': np.array([0.0, 0.1, 0.2])}
],
}
tar = {
'a': 1,
'b': [
{'c': array_c},
{'d': array_d},
{'e': array_e}
],
}
# yapf: enable
same, _ = tf_utils.check_same(ref, tar, rtol=1e-6, atol=1e-6)
self.assertEqual(tar_same, same)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"iree.tf.support.tf_utils.check_same",
"iree.tf.support.tf_utils.to_mlir_type",
"iree.tf.support.tf_utils.apply_function",
"numpy.array",
"absl.testing.parameterized.named_parameters",
"iree.tf.support.tf_utils.save_input_values"
] |
[((823, 1008), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["[('int8_to_i8', np.int8, 'i8'), ('int32_to_i32', np.int32, 'i32'), (\n 'float32_to_f32', np.float32, 'f32'), ('float64_to_f64', np.float64, 'f64')\n ]"], {}), "([('int8_to_i8', np.int8, 'i8'), (\n 'int32_to_i32', np.int32, 'i32'), ('float32_to_f32', np.float32, 'f32'),\n ('float64_to_f64', np.float64, 'f64')])\n", (853, 1008), False, 'from absl.testing import parameterized\n'), ((3363, 3377), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3375, 3377), True, 'import tensorflow as tf\n'), ((1704, 1752), 'iree.tf.support.tf_utils.apply_function', 'tf_utils.apply_function', (['inputs', '(lambda x: x - 1)'], {}), '(inputs, lambda x: x - 1)\n', (1727, 1752), False, 'from iree.tf.support import tf_utils\n'), ((3243, 3296), 'iree.tf.support.tf_utils.check_same', 'tf_utils.check_same', (['ref', 'tar'], {'rtol': '(1e-06)', 'atol': '(1e-06)'}), '(ref, tar, rtol=1e-06, atol=1e-06)\n', (3262, 3296), False, 'from iree.tf.support import tf_utils\n'), ((1180, 1213), 'iree.tf.support.tf_utils.to_mlir_type', 'tf_utils.to_mlir_type', (['numpy_type'], {}), '(numpy_type)\n', (1201, 1213), False, 'from iree.tf.support import tf_utils\n'), ((1493, 1527), 'iree.tf.support.tf_utils.save_input_values', 'tf_utils.save_input_values', (['inputs'], {}), '(inputs)\n', (1519, 1527), False, 'from iree.tf.support import tf_utils\n'), ((1943, 1962), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1951, 1962), True, 'import numpy as np\n'), ((1985, 2010), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (1993, 2010), True, 'import numpy as np\n'), ((2033, 2058), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2041, 2058), True, 'import numpy as np\n'), ((2166, 2185), 'numpy.array', 'np.array', (['[1, 1, 2]'], {}), '([1, 1, 2])\n', (2174, 2185), True, 'import numpy as np\n'), ((2208, 2233), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2216, 2233), True, 'import numpy as np\n'), ((2256, 2281), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2264, 2281), True, 'import numpy as np\n'), ((2393, 2412), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2401, 2412), True, 'import numpy as np\n'), ((2435, 2460), 'numpy.array', 'np.array', (["['a', '1', '2']"], {}), "(['a', '1', '2'])\n", (2443, 2460), True, 'import numpy as np\n'), ((2483, 2508), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2491, 2508), True, 'import numpy as np\n'), ((2619, 2638), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2627, 2638), True, 'import numpy as np\n'), ((2661, 2686), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2669, 2686), True, 'import numpy as np\n'), ((2709, 2734), 'numpy.array', 'np.array', (['[1.0, 0.1, 0.2]'], {}), '([1.0, 0.1, 0.2])\n', (2717, 2734), True, 'import numpy as np\n'), ((1285, 1317), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.int32'}), '([1, 2], dtype=np.int32)\n', (1293, 1317), True, 'import numpy as np\n'), ((1356, 1390), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.float32'}), '([1, 2], dtype=np.float32)\n', (1364, 1390), True, 'import numpy as np\n'), ((2937, 2956), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2945, 2956), True, 'import numpy as np\n'), ((2977, 3002), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2985, 3002), True, 'import numpy as np\n'), ((3023, 3048), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (3031, 3048), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
texts = []
f = open('preprocess/jull_review.csv', 'r')
for line in f.readlines():
oneline = line.replace("\n", "").split(",")
oneline = list(filter(None, oneline))
texts.append(oneline)
print(len(texts))
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(texts)]
doc2vec_model = Doc2Vec(documents, vector_size=100, window=10, min_count=30, workers=4)
doc2vec_model.save('doc2vec_v=100_dm0.model')
doc2vec_model = Doc2Vec(documents, vector_size=100, window=10, min_count=30, workers=4)
doc2vec_model.save('doc2vec_v=100_dm1.model')
|
[
"gensim.models.doc2vec.Doc2Vec",
"gensim.models.doc2vec.TaggedDocument"
] |
[((404, 475), 'gensim.models.doc2vec.Doc2Vec', 'Doc2Vec', (['documents'], {'vector_size': '(100)', 'window': '(10)', 'min_count': '(30)', 'workers': '(4)'}), '(documents, vector_size=100, window=10, min_count=30, workers=4)\n', (411, 475), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((539, 610), 'gensim.models.doc2vec.Doc2Vec', 'Doc2Vec', (['documents'], {'vector_size': '(100)', 'window': '(10)', 'min_count': '(30)', 'workers': '(4)'}), '(documents, vector_size=100, window=10, min_count=30, workers=4)\n', (546, 610), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((330, 354), 'gensim.models.doc2vec.TaggedDocument', 'TaggedDocument', (['doc', '[i]'], {}), '(doc, [i])\n', (344, 354), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n')]
|
import subprocess, json, sys, time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
with open('config.json', 'r') as f:
CONFIG = json.load(f)
proc = subprocess.Popen([sys.executable, 'server.py'], stdout=sys.stdout)
print('Waiting for server to start.')
time.sleep(4)
options = Options()
options.add_argument(f'--kiosk http://{CONFIG["host"]}:{str(CONFIG["port"])}')
driver = webdriver.Firefox(firefox_options=options)
driver.get(f'http://{CONFIG["host"]}:{str(CONFIG["port"])}')
driver.fullscreen_window()
proc.wait()
|
[
"subprocess.Popen",
"json.load",
"selenium.webdriver.Firefox",
"time.sleep",
"selenium.webdriver.firefox.options.Options"
] |
[((192, 258), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'server.py']"], {'stdout': 'sys.stdout'}), "([sys.executable, 'server.py'], stdout=sys.stdout)\n", (208, 258), False, 'import subprocess, json, sys, time\n'), ((297, 310), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (307, 310), False, 'import subprocess, json, sys, time\n'), ((322, 331), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (329, 331), False, 'from selenium.webdriver.firefox.options import Options\n'), ((420, 462), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'firefox_options': 'options'}), '(firefox_options=options)\n', (437, 462), False, 'from selenium import webdriver\n'), ((171, 183), 'json.load', 'json.load', (['f'], {}), '(f)\n', (180, 183), False, 'import subprocess, json, sys, time\n')]
|
import unittest
from two_sum.solution import Solution
class MyTestCase(unittest.TestCase):
def test_two_sum(self):
s = Solution()
nums = [2,7,11,15]
target = 9
result = s.twoSum(nums, target)
self.assertEqual(result, [0,1])
nums = [-1,-2,-3,-4,-5]
target = -8
result = s.twoSum(nums, target)
self.assertEqual(result, [2,4])
def test_two_sum_two_pass_hash(self):
s = Solution()
nums = [2,7,11,15]
target = 9
result = s.twoSumTwoPassHash(nums, target)
self.assertEqual(result, [0,1])
nums = [-1,-2,-3,-4,-5]
target = -8
result = s.twoSumTwoPassHash(nums, target)
self.assertEqual(result, [2,4])
def test_two_sum_one_pass_hash(self):
s = Solution()
# nums = [2,7,11,15]
# target = 9
# result = s.twoSumOnePassHash(nums, target)
# self.assertEqual(result, [0,1])
#
#
# nums = [-1,-2,-3,-4,-5]
# target = -8
# result = s.twoSumOnePassHash(nums, target)
# self.assertEqual(result, [2,4])
#
nums = [3,3]
target = 6
result = s.twoSumOnePassHash(nums, target)
self.assertEqual(result, [0,1])
|
[
"two_sum.solution.Solution"
] |
[((135, 145), 'two_sum.solution.Solution', 'Solution', ([], {}), '()\n', (143, 145), False, 'from two_sum.solution import Solution\n'), ((463, 473), 'two_sum.solution.Solution', 'Solution', ([], {}), '()\n', (471, 473), False, 'from two_sum.solution import Solution\n'), ((813, 823), 'two_sum.solution.Solution', 'Solution', ([], {}), '()\n', (821, 823), False, 'from two_sum.solution import Solution\n')]
|
# -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
import pytest
from spyder.plugins.editor.fallback.actor import FallbackActor
from spyder.plugins.editor.lsp.tests.conftest import qtbot_module
@pytest.fixture(scope='module')
def fallback(qtbot_module, request):
fallback = FallbackActor(None)
qtbot_module.addWidget(fallback)
with qtbot_module.waitSignal(fallback.sig_fallback_ready, timeout=30000):
fallback.start()
def teardown():
fallback.stop()
request.addfinalizer(teardown)
return fallback
|
[
"spyder.plugins.editor.fallback.actor.FallbackActor",
"spyder.plugins.editor.lsp.tests.conftest.qtbot_module.waitSignal",
"pytest.fixture",
"spyder.plugins.editor.lsp.tests.conftest.qtbot_module.addWidget"
] |
[((300, 330), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (314, 330), False, 'import pytest\n'), ((383, 402), 'spyder.plugins.editor.fallback.actor.FallbackActor', 'FallbackActor', (['None'], {}), '(None)\n', (396, 402), False, 'from spyder.plugins.editor.fallback.actor import FallbackActor\n'), ((407, 439), 'spyder.plugins.editor.lsp.tests.conftest.qtbot_module.addWidget', 'qtbot_module.addWidget', (['fallback'], {}), '(fallback)\n', (429, 439), False, 'from spyder.plugins.editor.lsp.tests.conftest import qtbot_module\n'), ((450, 517), 'spyder.plugins.editor.lsp.tests.conftest.qtbot_module.waitSignal', 'qtbot_module.waitSignal', (['fallback.sig_fallback_ready'], {'timeout': '(30000)'}), '(fallback.sig_fallback_ready, timeout=30000)\n', (473, 517), False, 'from spyder.plugins.editor.lsp.tests.conftest import qtbot_module\n')]
|
from pygame.draw import rect as draw_rect
def darken_color(color, factor):
return tuple(int(c * factor) for c in color)
def draw_piece(surf, color, left, top, width, height, size):
padding_factor = 0.025
shadow_factor = 0.085
margin_factor = 0.05
base_color = color
margin_color = darken_color(color, 0.8)
bottom_color = darken_color(color, 0.4)
# Applying padding
padding = int(size * padding_factor)
left, top = left + padding, top + padding
width, height = width - 2 * padding, height - 2 * padding
size = size - 2 * padding
# Applying shadow effect
shadow = int(size * shadow_factor)
top_rect = (left, top, width - shadow, height - shadow)
bottom_rect = (left + shadow, top + shadow, width - shadow, height - shadow)
draw_rect(surf, bottom_color, bottom_rect)
draw_rect(surf, base_color, top_rect)
# Draw margins
draw_rect(surf, margin_color, top_rect, int(size * margin_factor))
|
[
"pygame.draw.rect"
] |
[((796, 838), 'pygame.draw.rect', 'draw_rect', (['surf', 'bottom_color', 'bottom_rect'], {}), '(surf, bottom_color, bottom_rect)\n', (805, 838), True, 'from pygame.draw import rect as draw_rect\n'), ((843, 880), 'pygame.draw.rect', 'draw_rect', (['surf', 'base_color', 'top_rect'], {}), '(surf, base_color, top_rect)\n', (852, 880), True, 'from pygame.draw import rect as draw_rect\n')]
|
#!/usr/bin/env python
"""
Standard BOX 2D module with single joint
"""
import gym_rem2D.morph.module_utility as mu
from gym_rem.utils import Rot
from enum import Enum
import numpy as np
from Controller import m_controller
import random
import math
from gym_rem2D.morph import abstract_module
from gym_rem2D.morph import simple_module as sm
import Box2D as B2D
from Box2D.b2 import (edgeShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
class Connection(Enum):
"""Available connections for standard 2D module"""
left = (1.,0.,0.)
right = (-1.,0.,0.)
top = (0.,1.0,0.)
class Circular2D(abstract_module.Module):
"""Standard 2D module"""
def __init__(self, theta=0, size=(0.1,0.1, 0.0)):
self.theta = theta % 2 # double check
self.size = np.array(size)
assert self.size.shape == (3,), "Size must be a 3 element vector! : this is a 2D module but takes in a three dimensional size vector for now. Third entry is ignored"
self.connection_axis = np.array([0., 0., 1.])
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# NOTE: The fudge factor is to avoid colliding with the plane once
# spawned
self.position = np.array([0., self.size[2] / 2. + 0.002, 0.]) # uses only x and y
self._children = {}
self.controller = m_controller.Controller()
# relative scales
self.radius = 0.25
self.angle = math.pi/2
self.type = "CIRCLE"
self.MIN_RADIUS = 0.25
self.MAX_RADIUS = 0.5
self.MIN_ANGLE = math.pi/4
self.MAX_ANGLE = math.pi*2
self.torque = 50
#self.joint = None # needs joint
def limitWH(self):
"""Limit morphology to bounds"""
if self.radius > self.MAX_RADIUS:
self.radius = self.MAX_RADIUS
elif self.radius < self.MIN_RADIUS:
self.radius = self.MIN_RADIUS
if self.angle >self.MAX_ANGLE:
self.angle = self.MAX_ANGLE
elif self.angle < self.MIN_ANGLE:
self.angle = self.MIN_ANGLE
def mutate(self, MORPH_MUTATION_RATE,MUTATION_RATE,MUT_SIGMA):
"""
To mutate the shape and controller stored in the modules.
"""
#return
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.radius = random.gauss(self.radius, MUT_SIGMA)
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.angle = random.gauss(self.angle,MUT_SIGMA * math.pi)
self.limitWH()
if self.controller is not None:
self.controller.mutate(MUTATION_RATE,MUT_SIGMA, self.angle)
def setMorph(self,val1, val2, val3):
# values are between -1 and 1
self.radius = val1 + 1.5
# val2 is not used since radius
self.angle = self.MIN_ANGLE +(((val3 + 1.0)*0.5) * (self.MAX_ANGLE-self.MIN_ANGLE))
# limit values
self.limitWH()
def __setitem__(self, key, module):
if not isinstance(key, Connection):
raise TypeError("Key: '{}' is not a Connection type".format(key))
if key in self._children:
raise ModuleAttached()
if key not in self.available:
raise ConnectionObstructed()
# Add module as a child
self._children[key] = module
# Calculate connection point
direction = self.orientation.rotate(np.array(key.value))
position = self.position + (direction * self.size) / 2.
# Update parent pointer of module
module.update(self, position, direction)
def update(self, parent=None, pos=None, direction=None):
# Update own orientation first in case we have been previously
# connected
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# Update position in case parent is None
self.position = np.array([0., 0., self.size[2] / 2. + 0.002])
# Reset connection in case parent is None
self.connection = None
# Call super to update orientation
super().update(parent, pos, direction)
# If parent is not None we need to update position and connection point
if self.parent is not None:
# Update center position for self
# NOTE: We add a little fudge factor to avoid overlap
self.position = pos + (direction * self.size * 1.01) / 2.
# Calculate connection points for joint
conn = np.array([0., 0., -self.size[2] / 2.])
parent_conn = parent.orientation.T.rotate(pos - parent.position)
self.connection = (parent_conn, conn)
# Update potential children
self.update_children()
def update_children(self):
for conn in self._children:
direction = self.orientation.rotate(np.array(conn.value))
position = self.position + (direction * self.size) / 2.
self._children[conn].update(self, position, direction)
def spawn(self):
orient = self.orientation.as_quat()
cuid = B2D.b2CircleShape
cuid.m_p.Set(self.position)
if (self.parent):
self.joint = B2D.b2RevoluteJoint()
return cuid
def get_global_position_of_connection_site(self,con=None, parent_component = None):
if con is None:
con = Connection.left # get intersection of rectangle from width and height
local_position = [] # 2d array
local_angle = (con.value[0] * (self.angle)) # positive for left, negative for right
# position relative to y directional vector
if parent_component:
local_angle+=parent_component.angle
x = math.cos(local_angle+ math.pi/2)*self.radius
y = math.sin(local_angle+ math.pi/2)*self.radius
local_position.append(x)
local_position.append(y)
if parent_component is None:
return local_position,local_angle
global_position = [local_position[0]+parent_component.position[0],
local_position[1]+parent_component.position[1]]
return global_position, local_angle
def create(self,world,TERRAIN_HEIGHT,module=None,node=None,connection_site=None, p_c=None, module_list=None, position = None):
# get module height and width
if p_c is not None and connection_site is None:
raise("When you want to attach a new component to a parent component, you have to supply",
"a connection_site object with it. This connection_site object defines where to anchor",
"the joint in between to components")
n_radius = self.radius
angle = 0
pos = [7,10,0];
if position is not None:
pos = position
if (p_c is not None):
local_pos_x =math.cos(connection_site.orientation.x+ math.pi/2) * n_radius
local_pos_y =math.sin(connection_site.orientation.x+ math.pi/2) * n_radius
pos[0] = (local_pos_x) + connection_site.position.x
pos[1] = (local_pos_y) + connection_site.position.y
# This module will create one component that will be temporarily stored in ncomponent
new_component = None
# This module will create one joint (if a parent component is present) that will be temporarily stored in njoint
njoint = None
components = []
joints = []
if connection_site:
angle += connection_site.orientation.x
if (pos[1] - n_radius < TERRAIN_HEIGHT): #TODO CHANGE TO TERRAIN_HEIGT OR DO CHECK ELSEWHERE
if node is not None:
node.component = None
return components,joints
else:
fixture = fixtureDef(
shape=B2D.b2CircleShape(radius =n_radius),
density=1,
friction=0.1,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001
)
new_component = world.CreateDynamicBody(
position=(pos[0],pos[1]),
angle = angle,
fixtures = fixture)
color = [255,255,255]
if node is not None and module_list is not None:
color = world.cmap(node.type/len(module_list))
elif node is not None and module_list is None:
print("Note: cannot assign a color to the module since the 'module_list' is not passed as an argument")
# move to component creator
new_component.color1 = (color[0],color[1],color[2])
new_component.color2 = (color[0],color[1],color[2])
components.append(new_component)
if node is not None:
node.component = [new_component]
if connection_site is not None:
joint = mu.create_joint(world, p_c,new_component,connection_site, angle, self.torque)
joints.append(joint)
return components, joints
|
[
"gym_rem.utils.Rot.from_axis",
"gym_rem2D.morph.module_utility.create_joint",
"random.uniform",
"Box2D.b2RevoluteJoint",
"math.sin",
"numpy.array",
"math.cos",
"Box2D.b2CircleShape",
"random.gauss",
"Controller.m_controller.Controller"
] |
[((770, 784), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (778, 784), True, 'import numpy as np\n'), ((981, 1006), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (989, 1006), True, 'import numpy as np\n'), ((1025, 1089), 'gym_rem.utils.Rot.from_axis', 'Rot.from_axis', (['self.connection_axis', '(-self.theta * (np.pi / 2.0))'], {}), '(self.connection_axis, -self.theta * (np.pi / 2.0))\n', (1038, 1089), False, 'from gym_rem.utils import Rot\n'), ((1201, 1249), 'numpy.array', 'np.array', (['[0.0, self.size[2] / 2.0 + 0.002, 0.0]'], {}), '([0.0, self.size[2] / 2.0 + 0.002, 0.0])\n', (1209, 1249), True, 'import numpy as np\n'), ((1309, 1334), 'Controller.m_controller.Controller', 'm_controller.Controller', ([], {}), '()\n', (1332, 1334), False, 'from Controller import m_controller\n'), ((3344, 3408), 'gym_rem.utils.Rot.from_axis', 'Rot.from_axis', (['self.connection_axis', '(-self.theta * (np.pi / 2.0))'], {}), '(self.connection_axis, -self.theta * (np.pi / 2.0))\n', (3357, 3408), False, 'from gym_rem.utils import Rot\n'), ((3480, 3528), 'numpy.array', 'np.array', (['[0.0, 0.0, self.size[2] / 2.0 + 0.002]'], {}), '([0.0, 0.0, self.size[2] / 2.0 + 0.002])\n', (3488, 3528), True, 'import numpy as np\n'), ((2065, 2085), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2079, 2085), False, 'import random\n'), ((2125, 2161), 'random.gauss', 'random.gauss', (['self.radius', 'MUT_SIGMA'], {}), '(self.radius, MUT_SIGMA)\n', (2137, 2161), False, 'import random\n'), ((2167, 2187), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2181, 2187), False, 'import random\n'), ((2226, 2271), 'random.gauss', 'random.gauss', (['self.angle', '(MUT_SIGMA * math.pi)'], {}), '(self.angle, MUT_SIGMA * math.pi)\n', (2238, 2271), False, 'import random\n'), ((3027, 3046), 'numpy.array', 'np.array', (['key.value'], {}), '(key.value)\n', (3035, 3046), True, 'import numpy as np\n'), ((3985, 4026), 'numpy.array', 'np.array', (['[0.0, 0.0, -self.size[2] / 2.0]'], {}), '([0.0, 0.0, -self.size[2] / 2.0])\n', (3993, 4026), True, 'import numpy as np\n'), ((4576, 4597), 'Box2D.b2RevoluteJoint', 'B2D.b2RevoluteJoint', ([], {}), '()\n', (4595, 4597), True, 'import Box2D as B2D\n'), ((5029, 5064), 'math.cos', 'math.cos', (['(local_angle + math.pi / 2)'], {}), '(local_angle + math.pi / 2)\n', (5037, 5064), False, 'import math\n'), ((5080, 5115), 'math.sin', 'math.sin', (['(local_angle + math.pi / 2)'], {}), '(local_angle + math.pi / 2)\n', (5088, 5115), False, 'import math\n'), ((7637, 7716), 'gym_rem2D.morph.module_utility.create_joint', 'mu.create_joint', (['world', 'p_c', 'new_component', 'connection_site', 'angle', 'self.torque'], {}), '(world, p_c, new_component, connection_site, angle, self.torque)\n', (7652, 7716), True, 'import gym_rem2D.morph.module_utility as mu\n'), ((4286, 4306), 'numpy.array', 'np.array', (['conn.value'], {}), '(conn.value)\n', (4294, 4306), True, 'import numpy as np\n'), ((5990, 6043), 'math.cos', 'math.cos', (['(connection_site.orientation.x + math.pi / 2)'], {}), '(connection_site.orientation.x + math.pi / 2)\n', (5998, 6043), False, 'import math\n'), ((6069, 6122), 'math.sin', 'math.sin', (['(connection_site.orientation.x + math.pi / 2)'], {}), '(connection_site.orientation.x + math.pi / 2)\n', (6077, 6122), False, 'import math\n'), ((6803, 6837), 'Box2D.b2CircleShape', 'B2D.b2CircleShape', ([], {'radius': 'n_radius'}), '(radius=n_radius)\n', (6820, 6837), True, 'import Box2D as B2D\n')]
|
#!/usr/bin/env python3
import requests
import base64
import re
from levels_credentials import credentials
level_url = credentials[5]["url"]
level_username = credentials[5]["level"]
level_password = credentials[5]["password"]
next_level_url = credentials[6]["url"]
next_level_username = credentials[6]["level"]
credentials = "%s:%s" % (level_username, level_password)
auth_creds = base64.b64encode(credentials.encode("ascii"))
heads = {"Authorization": "Basic %s" % auth_creds.decode("ascii"), "Referer": next_level_url}
cooks = {"loggedin": "1"}
response = requests.get(level_url, headers=heads, cookies=cooks)
data = response.text
strings = re.split('\n|:|\s|<|>', data)
next_password = strings[strings.index(next_level_username) + 2]
print(next_password)
|
[
"levels_credentials.credentials.encode",
"re.split",
"requests.get"
] |
[((562, 615), 'requests.get', 'requests.get', (['level_url'], {'headers': 'heads', 'cookies': 'cooks'}), '(level_url, headers=heads, cookies=cooks)\n', (574, 615), False, 'import requests\n'), ((648, 678), 're.split', 're.split', (['"""\n|:|\\\\s|<|>"""', 'data'], {}), "('\\n|:|\\\\s|<|>', data)\n", (656, 678), False, 'import re\n'), ((401, 428), 'levels_credentials.credentials.encode', 'credentials.encode', (['"""ascii"""'], {}), "('ascii')\n", (419, 428), False, 'from levels_credentials import credentials\n')]
|
import warnings
import sc2
from sharpy.plans.require.require_base import RequireBase
class Gas(RequireBase):
"""Require that a specific number of minerals are "in the bank"."""
def __init__(self, vespene_requirement: int):
assert vespene_requirement is not None and isinstance(vespene_requirement, int)
super().__init__()
self.vespene_requirement = vespene_requirement
def check(self) -> bool:
if self.ai.vespene > self.vespene_requirement:
return True
return False
class RequiredGas(Gas):
def __init__(self, vespene_requirement: int):
warnings.warn("'RequiredGas' is deprecated, use 'Gas' instead", DeprecationWarning, 2)
super().__init__(vespene_requirement)
|
[
"warnings.warn"
] |
[((621, 711), 'warnings.warn', 'warnings.warn', (['"""\'RequiredGas\' is deprecated, use \'Gas\' instead"""', 'DeprecationWarning', '(2)'], {}), '("\'RequiredGas\' is deprecated, use \'Gas\' instead",\n DeprecationWarning, 2)\n', (634, 711), False, 'import warnings\n')]
|
import logging
import os
# set the default logging level to info
logging.basicConfig(level=logging.INFO)
ROOT_SRC_DIR = os.path.dirname(os.path.abspath(__file__))
USERNAME = os.environ.get('APP_USERNAME', 'admin')
PASSWORD = os.environ.get('APP_PASSWORD', '<PASSWORD>')
WORKER_NUM_CPUS = os.environ.get('WORKER_NUM_CPUS', .25)
SASL_USERNAME = os.environ.get('SASL_USERNAME', None)
SASL_PASSWORD = os.environ.get('SASL_PASSWORD', None)
SECURITY_PROTOCOL = os.environ.get('SECURITY_PROTOCOL', 'PLAINTEXT')
SASL_MECHANISM = os.environ.get('SASL_MECHANISM')
WORKER_CONFIG_PATH = os.environ.get('WORKER_CONFIG_PATH', '/../config/consumer_config.json')
RAY_HEAD_ADDRESS = os.environ.get('RAY_HEAD_ADDRESS', 'auto')
LOCAL_MODE = os.environ.get('LOCAL_MODE', 'Y')
|
[
"os.environ.get",
"os.path.abspath",
"logging.basicConfig"
] |
[((66, 105), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (85, 105), False, 'import logging\n'), ((176, 215), 'os.environ.get', 'os.environ.get', (['"""APP_USERNAME"""', '"""admin"""'], {}), "('APP_USERNAME', 'admin')\n", (190, 215), False, 'import os\n'), ((227, 271), 'os.environ.get', 'os.environ.get', (['"""APP_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('APP_PASSWORD', '<PASSWORD>')\n", (241, 271), False, 'import os\n'), ((291, 330), 'os.environ.get', 'os.environ.get', (['"""WORKER_NUM_CPUS"""', '(0.25)'], {}), "('WORKER_NUM_CPUS', 0.25)\n", (305, 330), False, 'import os\n'), ((347, 384), 'os.environ.get', 'os.environ.get', (['"""SASL_USERNAME"""', 'None'], {}), "('SASL_USERNAME', None)\n", (361, 384), False, 'import os\n'), ((401, 438), 'os.environ.get', 'os.environ.get', (['"""SASL_PASSWORD"""', 'None'], {}), "('SASL_PASSWORD', None)\n", (415, 438), False, 'import os\n'), ((459, 507), 'os.environ.get', 'os.environ.get', (['"""SECURITY_PROTOCOL"""', '"""PLAINTEXT"""'], {}), "('SECURITY_PROTOCOL', 'PLAINTEXT')\n", (473, 507), False, 'import os\n'), ((525, 557), 'os.environ.get', 'os.environ.get', (['"""SASL_MECHANISM"""'], {}), "('SASL_MECHANISM')\n", (539, 557), False, 'import os\n'), ((579, 650), 'os.environ.get', 'os.environ.get', (['"""WORKER_CONFIG_PATH"""', '"""/../config/consumer_config.json"""'], {}), "('WORKER_CONFIG_PATH', '/../config/consumer_config.json')\n", (593, 650), False, 'import os\n'), ((670, 712), 'os.environ.get', 'os.environ.get', (['"""RAY_HEAD_ADDRESS"""', '"""auto"""'], {}), "('RAY_HEAD_ADDRESS', 'auto')\n", (684, 712), False, 'import os\n'), ((726, 759), 'os.environ.get', 'os.environ.get', (['"""LOCAL_MODE"""', '"""Y"""'], {}), "('LOCAL_MODE', 'Y')\n", (740, 759), False, 'import os\n'), ((138, 163), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import os\n')]
|
import numpy as np
from multiprocessing import Pool
from ..bbox import bbox_overlaps
# https://zhuanlan.zhihu.com/p/34655990
def calc_PR_curve(pred, label):
pos = label[label == 1] # 正样本
threshold = np.sort(pred)[::-1] # pred是每个样本的正例预测概率值,逆序
label = label[pred.argsort()[::-1]]
precision = []
recall = []
tp = 0
fp = 0
ap = 0 # 平均精度
for i in range(len(threshold)):
if label[i] == 1:
tp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
# 近似曲线下面积
ap += (recall[i] - recall[i - 1]) * precision[i]
else:
fp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
return precision, recall, ap
def tpfp_voc(det_bboxes, gt_bboxes, iou_thr=0.5):
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
# tp和fp都是针对预测个数而言,不是gt个数
tp = np.zeros(num_dets, dtype=np.float32)
fp = np.zeros(num_dets, dtype=np.float32)
# 如果gt=0,那么所有预测框都算误报,所有预测bbox位置的fp都设置为1
if gt_bboxes.shape[0] == 0:
fp[...] = 1
return tp, fp
if num_dets == 0:
return tp, fp
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes).numpy()
# print(ious)
# 对于每个预测框,找到最匹配的gt iou
ious_max = ious.max(axis=1)
# 对于每个预测框,找到最匹配gt的索引
ious_argmax = ious.argmax(axis=1)
# 按照预测概率分支降序排列
sort_inds = np.argsort(-det_bboxes[:, -1])
gt_covered = np.zeros(num_gts, dtype=bool)
# 多对一情况下,除了概率分值最大且大于阈值的预测框算tp外,其他框全部算fp
for i in sort_inds:
# 如果大于iou,则表示匹配
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
# 每个gt bbox只匹配一次,且是和预测概率最大的匹配,不是按照iou
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
return tp, fp
def _average_precision(recalls, precisions, mode='voc2007'):
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'voc2012': # 平滑后就是标准的PR曲线算法
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
# 写法比较高级,高效
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) # 每段区间内,精度都是取最大值,也就是水平线
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] # 找到召回率转折点,表示x轴移动区间索引
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) # 每段面积和
elif mode == 'voc2007': # 11点法,需要平平滑处理
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
# code ref from mmdetection
def voc_eval_map(results, annotations, iou_thr=0.5, name='voc2007', nproc=4):
"""
:param results: list[list],外层list是指代图片编号,内层list是指代类别编号,
假设一共20个类,则内层list长度为20,每个List内部是numpy矩阵,nx5表示每张图片对应的每个类别的检测bbox,xyxyconf格式
:param annotations:和results一样
:param iou_thr: 是否算TP的阈值,voc默认是0.5
:param name: 采用哪一种评估指标,voc2007是11点,voc2012是标准pr曲线计算
:return:
"""
assert len(results) == len(annotations)
num_imgs = len(results) # 图片个数
num_classes = len(results[0]) # positive class num
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
cls_dets = [img_res[i] for img_res in results]
cls_gts = [img_res[i] for img_res in annotations]
tpfp = pool.starmap(
tpfp_voc,
zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))
# 得到每个预测bbox的tp和fp情况
tp, fp = tuple(zip(*tpfp))
# 统计gt bbox数目
num_gts = 0
for j, bbox in enumerate(cls_gts):
num_gts += bbox.shape[0]
# 合并所有图片所有预测bbox
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0] # 检测bbox个数
# 以上计算出了每个预测bbox的tp和fp情况
# 此处计算精度和召回率,写的比较高级
sort_inds = np.argsort(-cls_dets[:, -1]) # 按照预测概率分值降序排列
# 仔细思考这种写法,其实是c3_pr_roc.py里面calc_PR_curve的高级快速写法
tp = np.hstack(tp)[sort_inds][None]
fp = np.hstack(fp)[sort_inds][None]
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts, eps)
precisions = tp / np.maximum((tp + fp), eps)
recalls = recalls[0, :]
precisions = precisions[0, :]
# print('recalls', recalls, 'precisions', precisions)
ap = _average_precision(recalls, precisions, name)[0]
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
pool.close()
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
return mean_ap
|
[
"numpy.maximum",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.sort",
"numpy.cumsum",
"numpy.finfo",
"numpy.where",
"numpy.arange",
"numpy.array",
"multiprocessing.Pool",
"numpy.vstack"
] |
[((934, 970), 'numpy.zeros', 'np.zeros', (['num_dets'], {'dtype': 'np.float32'}), '(num_dets, dtype=np.float32)\n', (942, 970), True, 'import numpy as np\n'), ((980, 1016), 'numpy.zeros', 'np.zeros', (['num_dets'], {'dtype': 'np.float32'}), '(num_dets, dtype=np.float32)\n', (988, 1016), True, 'import numpy as np\n'), ((1421, 1451), 'numpy.argsort', 'np.argsort', (['(-det_bboxes[:, -1])'], {}), '(-det_bboxes[:, -1])\n', (1431, 1451), True, 'import numpy as np\n'), ((1469, 1498), 'numpy.zeros', 'np.zeros', (['num_gts'], {'dtype': 'bool'}), '(num_gts, dtype=bool)\n', (1477, 1498), True, 'import numpy as np\n'), ((2182, 2220), 'numpy.zeros', 'np.zeros', (['num_scales'], {'dtype': 'np.float32'}), '(num_scales, dtype=np.float32)\n', (2190, 2220), True, 'import numpy as np\n'), ((3858, 3869), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (3862, 3869), False, 'from multiprocessing import Pool\n'), ((210, 223), 'numpy.sort', 'np.sort', (['pred'], {}), '(pred)\n', (217, 223), True, 'import numpy as np\n'), ((2281, 2327), 'numpy.zeros', 'np.zeros', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (2289, 2327), True, 'import numpy as np\n'), ((2343, 2388), 'numpy.ones', 'np.ones', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (2350, 2388), True, 'import numpy as np\n'), ((2404, 2437), 'numpy.hstack', 'np.hstack', (['(zeros, recalls, ones)'], {}), '((zeros, recalls, ones))\n', (2413, 2437), True, 'import numpy as np\n'), ((2453, 2490), 'numpy.hstack', 'np.hstack', (['(zeros, precisions, zeros)'], {}), '((zeros, precisions, zeros))\n', (2462, 2490), True, 'import numpy as np\n'), ((4392, 4411), 'numpy.vstack', 'np.vstack', (['cls_dets'], {}), '(cls_dets)\n', (4401, 4411), True, 'import numpy as np\n'), ((4543, 4571), 'numpy.argsort', 'np.argsort', (['(-cls_dets[:, -1])'], {}), '(-cls_dets[:, -1])\n', (4553, 4571), True, 'import numpy as np\n'), ((4746, 4767), 'numpy.cumsum', 'np.cumsum', (['tp'], {'axis': '(1)'}), '(tp, axis=1)\n', (4755, 4767), True, 'import numpy as np\n'), ((4781, 4802), 'numpy.cumsum', 'np.cumsum', (['fp'], {'axis': '(1)'}), '(fp, axis=1)\n', (4790, 4802), True, 'import numpy as np\n'), ((2590, 2628), 'numpy.maximum', 'np.maximum', (['mpre[:, i - 1]', 'mpre[:, i]'], {}), '(mpre[:, i - 1], mpre[:, i])\n', (2600, 2628), True, 'import numpy as np\n'), ((2792, 2852), 'numpy.sum', 'np.sum', (['((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])'], {}), '((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n', (2798, 2852), True, 'import numpy as np\n'), ((4817, 4837), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4825, 4837), True, 'import numpy as np\n'), ((4865, 4889), 'numpy.maximum', 'np.maximum', (['num_gts', 'eps'], {}), '(num_gts, eps)\n', (4875, 4889), True, 'import numpy as np\n'), ((4916, 4940), 'numpy.maximum', 'np.maximum', (['(tp + fp)', 'eps'], {}), '(tp + fp, eps)\n', (4926, 4940), True, 'import numpy as np\n'), ((2708, 2745), 'numpy.where', 'np.where', (['(mrec[i, 1:] != mrec[i, :-1])'], {}), '(mrec[i, 1:] != mrec[i, :-1])\n', (2716, 2745), True, 'import numpy as np\n'), ((2982, 3010), 'numpy.arange', 'np.arange', (['(0)', '(1 + 0.001)', '(0.1)'], {}), '(0, 1 + 0.001, 0.1)\n', (2991, 3010), True, 'import numpy as np\n'), ((4658, 4671), 'numpy.hstack', 'np.hstack', (['tp'], {}), '(tp)\n', (4667, 4671), True, 'import numpy as np\n'), ((4702, 4715), 'numpy.hstack', 'np.hstack', (['fp'], {}), '(fp)\n', (4711, 4715), True, 'import numpy as np\n'), ((5492, 5505), 'numpy.array', 'np.array', (['aps'], {}), '(aps)\n', (5500, 5505), True, 'import numpy as np\n')]
|
import cloudpickle
import numpy as np
import torch
from typing import List, Tuple
from scipy.linalg import eigh
def save_checkpoint(state, filename='checkpoint.pkl'):
data = cloudpickle.dumps(state)
with open(filename, 'wb') as fi:
fi.write(data)
def load_checkpoint(filename='checkpoint.pkl'):
with open(filename, 'rb') as fi:
return cloudpickle.load(fi)
|
[
"cloudpickle.dumps",
"cloudpickle.load"
] |
[((180, 204), 'cloudpickle.dumps', 'cloudpickle.dumps', (['state'], {}), '(state)\n', (197, 204), False, 'import cloudpickle\n'), ((367, 387), 'cloudpickle.load', 'cloudpickle.load', (['fi'], {}), '(fi)\n', (383, 387), False, 'import cloudpickle\n')]
|
from django.contrib import admin
from .models import School,Student
# Register your models here.
admin.site.register(School)
admin.site.register(Student)
|
[
"django.contrib.admin.site.register"
] |
[((100, 127), 'django.contrib.admin.site.register', 'admin.site.register', (['School'], {}), '(School)\n', (119, 127), False, 'from django.contrib import admin\n'), ((129, 157), 'django.contrib.admin.site.register', 'admin.site.register', (['Student'], {}), '(Student)\n', (148, 157), False, 'from django.contrib import admin\n')]
|
"""
B-Complement
Input:
part point clouds: B x P x N x 3
Output:
R and T: B x P x(3 + 4)
Losses:
Center L2 Loss, Rotation L2 Loss, Rotation Chamder-Distance Loss
"""
import torch
from torch import nn
import torch.nn.functional as F
import sys, os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
from cd.chamfer import chamfer_distance
from quaternion import qrot
import ipdb
from scipy.optimize import linear_sum_assignment
# PointNet Front-end
class PartPointNet(nn.Module):
def __init__(self, feat_len):
super(PartPointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 64, 1)
self.conv3 = nn.Conv1d(64, 64, 1)
self.conv4 = nn.Conv1d(64, 128, 1)
#self.conv5 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
#self.bn5 = nn.BatchNorm1d(1024)
self.mlp1 = nn.Linear(128, feat_len)
self.bn6 = nn.BatchNorm1d(feat_len)
"""
Input: B x N x 3
Output: B x F
"""
def forward(self, x):
x = x.permute(0, 2, 1)
x = torch.relu(self.bn1(self.conv1(x)))
x = torch.relu(self.bn2(self.conv2(x)))
x = torch.relu(self.bn3(self.conv3(x)))
x = torch.relu(self.bn4(self.conv4(x)))
#x = torch.relu(self.bn5(self.conv5(x)))
x = x.max(dim=-1)[0]
x = torch.relu(self.bn6(self.mlp1(x)))
return x
# PointNet Back-end
class PoseDecoder(nn.Module):
def __init__(self, feat_len):
super(PoseDecoder, self).__init__()
self.mlp1 = nn.Linear(feat_len, 512)
self.mlp2 = nn.Linear(512, 256)
self.trans = nn.Linear(256, 3)
self.quat = nn.Linear(256, 4)
self.quat.bias.data.zero_()
"""
Input: B x (2F + P + 16)
Output: B x 7
"""
def forward(self, feat):
feat = torch.relu(self.mlp1(feat))
feat = torch.relu(self.mlp2(feat))
trans = torch.tanh(self.trans(feat)) # consider to remove torch.tanh if not using PartNet normalization
quat_bias = feat.new_tensor([[[1.0, 0.0, 0.0, 0.0]]])
quat = self.quat(feat).add(quat_bias)
quat = quat / (1e-12 + quat.pow(2).sum(dim=-1, keepdim=True)).sqrt()
out = torch.cat([trans, quat.squeeze(0)], dim=-1)
return out
class Network(nn.Module):
def __init__(self, conf):
super(Network, self).__init__()
self.conf = conf
self.part_pointnet = PartPointNet(conf.feat_len)
self.pose_decoder = PoseDecoder(2 * conf.feat_len + conf.max_num_part + 16)
"""
Input: B x P x N x 3, B x P, B x P x P, B x 7
Output: B x P x (3 + 4)
"""
def forward(self,seq, part_pcs, part_valids, instance_label, gt_part_pose):
batch_size = part_pcs.shape[0]
num_part = part_pcs.shape[1]
num_point = part_pcs.shape[2]
pred_part_poses = np.zeros((batch_size, num_part, 7))
pred_part_poses = torch.tensor(pred_part_poses).to(self.conf.device)
# generate random_noise
random_noise = np.random.normal(loc=0.0, scale=1.0, size=[batch_size, num_part, 16]).astype(
np.float32) # B x P x 16
random_noise = torch.tensor(random_noise).to(self.conf.device)
for iter in range(num_part):
select_ind = seq[:,iter].int().tolist()
batch_ind = [i for i in range(len(select_ind))]
if iter == 0:
cur_pred_pose = gt_part_pose # B x 7
pred_part_poses= pred_part_poses.float()
pred_part_poses[batch_ind,select_ind,:] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :])# B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = cur_part # B x batch_ind,select_ind x N x 3
else:
cur_shape_feat = self.part_pointnet(cur_shape.view(batch_size, -1, 3)) # B x F
cur_part_feat = self.part_pointnet(part_pcs[batch_ind,select_ind, :, :])# B x F
cat_feat = torch.cat([cur_shape_feat, cur_part_feat, instance_label[batch_ind,select_ind, :].contiguous(), random_noise[batch_ind,select_ind, :].contiguous()], dim=-1) # B x (2F + P + 16)
cur_pred_pose = self.pose_decoder(cat_feat) # B x 7
pred_part_poses[batch_ind,select_ind, :] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :]) # B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = torch.cat([cur_shape, cur_part], dim=1) # B x select_ind x N x 3
pred_part_poses = pred_part_poses.double() * part_valids.unsqueeze(2).double()
return pred_part_poses.float()
"""
Input: * x N x 3, * x 3, * x 4, * x 3, * x 4,
Output: *, * (two lists)
"""
def linear_assignment(self, pts, centers1, quats1, centers2, quats2):
cur_part_cnt = pts.shape[0]
num_point = pts.shape[1]
with torch.no_grad():
cur_quats1 = quats1.unsqueeze(1).repeat(1, num_point, 1)
cur_centers1 = centers1.unsqueeze(1).repeat(1, num_point, 1)
cur_pts1 = qrot(cur_quats1, pts) + cur_centers1
cur_quats2 = quats2.unsqueeze(1).repeat(1, num_point, 1)
cur_centers2 = centers2.unsqueeze(1).repeat(1, num_point, 1)
cur_pts2 = qrot(cur_quats2, pts) + cur_centers2
cur_pts1 = cur_pts1.unsqueeze(1).repeat(1, cur_part_cnt, 1, 1).view(-1, num_point, 3)
cur_pts2 = cur_pts2.unsqueeze(0).repeat(cur_part_cnt, 1, 1, 1).view(-1, num_point, 3)
dist1, dist2 = chamfer_distance(cur_pts1, cur_pts2, transpose=False)
dist_mat = (dist1.mean(1) + dist2.mean(1)).view(cur_part_cnt, cur_part_cnt)
rind, cind = linear_sum_assignment(dist_mat.cpu().numpy())
return rind, cind
"""
Input: B x P x 3, B x P x 3, B x P
Output: B
"""
def get_trans_l2_loss(self, trans1, trans2, valids):
loss_per_data = (trans1 - trans2).pow(2).sum(dim=-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_l2_loss(self, pts, quat1, quat2, valids):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
loss_per_data = (pts1 - pts2).pow(2).sum(-1).mean(-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_cd_loss(self, pts, quat1, quat2, valids, device):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
loss_per_data = loss_per_data.to(device)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data #
def get_total_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
thre = 0.01
loss_per_data = loss_per_data.to(device)
acc = [[0 for i in range(num_part)]for j in range(batch_size)]
for i in range(batch_size):
for j in range(num_part):
if loss_per_data[i,j] < thre and valids[i,j]:
acc[i][j] = 1
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data , acc
def get_shape_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
pts1 = pts1.view(batch_size,num_part*num_point,3)
pts2 = pts2.view(batch_size,num_part*num_point,3)
dist1, dist2 = chamfer_distance(pts1, pts2, transpose=False)
valids = valids.unsqueeze(2).repeat(1,1,1000).view(batch_size,-1)
dist1 = dist1 * valids
dist2 = dist2 * valids
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.to(device)
return loss_per_data
def get_sym_point(self, point, x, y, z):
if x:
point[0] = - point[0]
if y:
point[1] = - point[1]
if z:
point[2] = - point[2]
return point.tolist()
def get_possible_point_list(self, point, sym):
sym = torch.tensor([1.0, 1.0, 1.0])
point_list = []
if sym.equal(torch.tensor([0.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
elif sym.equal(torch.tensor([0.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
elif sym.equal(torch.tensor([0.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
elif sym.equal(torch.tensor([1.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 1, 1, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 0, 1))
elif sym.equal(torch.tensor([0.0, 1.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
else:
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 1, 0))
point_list.append(self.get_sym_point(point, 1, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
point_list.append(self.get_sym_point(point, 1, 1, 1))
return point_list
def get_min_l2_dist(self, list1, list2, center1, center2, quat1, quat2):
list1 = torch.tensor(list1) # m x 3
list2 = torch.tensor(list2) # n x 3
len1 = list1.shape[0]
len2 = list2.shape[0]
center1 = center1.unsqueeze(0).repeat(len1, 1)
center2 = center2.unsqueeze(0).repeat(len2, 1)
quat1 = quat1.unsqueeze(0).repeat(len1, 1)
quat2 = quat2.unsqueeze(0).repeat(len2, 1)
list1 = list1.to(self.conf.device)
list2 = list2.to(self.conf.device)
list1 = center1 + qrot(quat1, list1)
list2 = center2 + qrot(quat2, list2)
mat1 = list1.unsqueeze(1).repeat(1, len2, 1)
mat2 = list2.unsqueeze(0).repeat(len1, 1, 1)
mat = (mat1 - mat2) * (mat1 - mat2)
mat = mat.sum(dim=-1)
return mat.min()
"""
Contact point loss metric
Date: 2020/5/22
Input B x P x 3, B x P x 4, B x P x P x 4, B x P x 3
Ouput B
"""
def get_contact_point_loss(self, center, quat, contact_points, sym_info):
batch_size = center.shape[0]
num_part = center.shape[1]
contact_point_loss = torch.zeros(batch_size)
total_num = 0
count = 0
for b in range(batch_size):
sum_loss = 0
for i in range(num_part):
for j in range(num_part):
if contact_points[b, i, j, 0]:
contact_point_1 = contact_points[b, i, j, 1:]
contact_point_2 = contact_points[b, j, i, 1:]
sym1 = sym_info[b, i]
sym2 = sym_info[b, j]
point_list_1 = self.get_possible_point_list(contact_point_1, sym1)
point_list_2 = self.get_possible_point_list(contact_point_2, sym2)
dist = self.get_min_l2_dist(point_list_1, point_list_2, center[b, i, :], center[b, j, :],
quat[b, i, :], quat[b, j, :]) # 1
if dist < 0.01:
count += 1
total_num += 1
sum_loss += dist
contact_point_loss[b] = sum_loss
return contact_point_loss, count, total_num
|
[
"torch.mean",
"os.path.abspath",
"cd.chamfer.chamfer_distance",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"numpy.zeros",
"quaternion.qrot",
"torch.cat",
"numpy.random.normal",
"torch.nn.Linear",
"torch.zeros",
"torch.no_grad",
"os.path.join",
"torch.tensor"
] |
[((355, 380), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (370, 380), False, 'import sys, os\n'), ((398, 432), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""../utils"""'], {}), "(BASE_DIR, '../utils')\n", (410, 432), False, 'import sys, os\n'), ((718, 737), 'torch.nn.Conv1d', 'nn.Conv1d', (['(3)', '(64)', '(1)'], {}), '(3, 64, 1)\n', (727, 737), False, 'from torch import nn\n'), ((759, 779), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (768, 779), False, 'from torch import nn\n'), ((801, 821), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (810, 821), False, 'from torch import nn\n'), ((843, 864), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(128)', '(1)'], {}), '(64, 128, 1)\n', (852, 864), False, 'from torch import nn\n'), ((931, 949), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (945, 949), False, 'from torch import nn\n'), ((969, 987), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (983, 987), False, 'from torch import nn\n'), ((1007, 1025), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (1021, 1025), False, 'from torch import nn\n'), ((1045, 1064), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1059, 1064), False, 'from torch import nn\n'), ((1127, 1151), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'feat_len'], {}), '(128, feat_len)\n', (1136, 1151), False, 'from torch import nn\n'), ((1171, 1195), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['feat_len'], {}), '(feat_len)\n', (1185, 1195), False, 'from torch import nn\n'), ((1808, 1832), 'torch.nn.Linear', 'nn.Linear', (['feat_len', '(512)'], {}), '(feat_len, 512)\n', (1817, 1832), False, 'from torch import nn\n'), ((1853, 1872), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (1862, 1872), False, 'from torch import nn\n'), ((1895, 1912), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1904, 1912), False, 'from torch import nn\n'), ((1934, 1951), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(4)'], {}), '(256, 4)\n', (1943, 1951), False, 'from torch import nn\n'), ((3152, 3187), 'numpy.zeros', 'np.zeros', (['(batch_size, num_part, 7)'], {}), '((batch_size, num_part, 7))\n', (3160, 3187), True, 'import numpy as np\n'), ((10047, 10092), 'cd.chamfer.chamfer_distance', 'chamfer_distance', (['pts1', 'pts2'], {'transpose': '(False)'}), '(pts1, pts2, transpose=False)\n', (10063, 10092), False, 'from cd.chamfer import chamfer_distance\n'), ((10680, 10709), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (10692, 10709), False, 'import torch\n'), ((13035, 13054), 'torch.tensor', 'torch.tensor', (['list1'], {}), '(list1)\n', (13047, 13054), False, 'import torch\n'), ((13080, 13099), 'torch.tensor', 'torch.tensor', (['list2'], {}), '(list2)\n', (13092, 13099), False, 'import torch\n'), ((14099, 14122), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (14110, 14122), False, 'import torch\n'), ((5777, 5792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5790, 5792), False, 'import torch\n'), ((6424, 6477), 'cd.chamfer.chamfer_distance', 'chamfer_distance', (['cur_pts1', 'cur_pts2'], {'transpose': '(False)'}), '(cur_pts1, cur_pts2, transpose=False)\n', (6440, 6477), False, 'from cd.chamfer import chamfer_distance\n'), ((8005, 8029), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (8015, 8029), False, 'import torch\n'), ((8032, 8056), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (8042, 8056), False, 'import torch\n'), ((8894, 8918), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (8904, 8918), False, 'import torch\n'), ((8921, 8945), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (8931, 8945), False, 'import torch\n'), ((10253, 10277), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (10263, 10277), False, 'import torch\n'), ((10280, 10304), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (10290, 10304), False, 'import torch\n'), ((10755, 10784), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10767, 10784), False, 'import torch\n'), ((13493, 13511), 'quaternion.qrot', 'qrot', (['quat1', 'list1'], {}), '(quat1, list1)\n', (13497, 13511), False, 'from quaternion import qrot\n'), ((13538, 13556), 'quaternion.qrot', 'qrot', (['quat2', 'list2'], {}), '(quat2, list2)\n', (13542, 13556), False, 'from quaternion import qrot\n'), ((3214, 3243), 'torch.tensor', 'torch.tensor', (['pred_part_poses'], {}), '(pred_part_poses)\n', (3226, 3243), False, 'import torch\n'), ((3320, 3389), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '[batch_size, num_part, 16]'}), '(loc=0.0, scale=1.0, size=[batch_size, num_part, 16])\n', (3336, 3389), True, 'import numpy as np\n'), ((3459, 3485), 'torch.tensor', 'torch.tensor', (['random_noise'], {}), '(random_noise)\n', (3471, 3485), False, 'import torch\n'), ((5308, 5347), 'torch.cat', 'torch.cat', (['[cur_shape, cur_part]'], {'dim': '(1)'}), '([cur_shape, cur_part], dim=1)\n', (5317, 5347), False, 'import torch\n'), ((5960, 5981), 'quaternion.qrot', 'qrot', (['cur_quats1', 'pts'], {}), '(cur_quats1, pts)\n', (5964, 5981), False, 'from quaternion import qrot\n'), ((6163, 6184), 'quaternion.qrot', 'qrot', (['cur_quats2', 'pts'], {}), '(cur_quats2, pts)\n', (6167, 6184), False, 'from quaternion import qrot\n'), ((10876, 10905), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (10888, 10905), False, 'import torch\n'), ((4121, 4179), 'quaternion.qrot', 'qrot', (['cur_pred_qrot', 'part_pcs[batch_ind, select_ind, :, :]'], {}), '(cur_pred_qrot, part_pcs[batch_ind, select_ind, :, :])\n', (4125, 4179), False, 'from quaternion import qrot\n'), ((5143, 5201), 'quaternion.qrot', 'qrot', (['cur_pred_qrot', 'part_pcs[batch_ind, select_ind, :, :]'], {}), '(cur_pred_qrot, part_pcs[batch_ind, select_ind, :, :])\n', (5147, 5201), False, 'from quaternion import qrot\n'), ((11063, 11092), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (11075, 11092), False, 'import torch\n'), ((11250, 11279), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (11262, 11279), False, 'import torch\n'), ((11437, 11466), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (11449, 11466), False, 'import torch\n'), ((11756, 11785), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 1.0]'], {}), '([1.0, 0.0, 1.0])\n', (11768, 11785), False, 'import torch\n'), ((12075, 12104), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 1.0])\n', (12087, 12104), False, 'import torch\n')]
|
import pytest
from dmwmclient import Client
@pytest.mark.asyncio
async def test_cycle():
client = Client()
dynamo = client.dynamo
cycle = await dynamo.latest_cycle()
assert type(cycle) is dict
assert set(cycle.keys()) == {'cycle', 'partition_id', 'timestamp', 'comment'}
@pytest.mark.asyncio
async def test_detail():
client = Client()
dynamo = client.dynamo
df = await dynamo.site_detail('T2_PK_NCP', 34069)
assert set(df.columns) == {'condition', 'condition_id', 'decision', 'name', 'site', 'size'}
assert df.sum()['size'] == 99787.18272119202
|
[
"dmwmclient.Client"
] |
[((104, 112), 'dmwmclient.Client', 'Client', ([], {}), '()\n', (110, 112), False, 'from dmwmclient import Client\n'), ((355, 363), 'dmwmclient.Client', 'Client', ([], {}), '()\n', (361, 363), False, 'from dmwmclient import Client\n')]
|
import configparser
import os
import logging
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../config.ini')
config = configparser.ConfigParser()
config.read_file(open(filename))
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
|
[
"os.path.dirname",
"logging.StreamHandler",
"logging.Formatter",
"configparser.ConfigParser",
"os.path.join",
"logging.getLogger"
] |
[((56, 81), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (71, 81), False, 'import os\n'), ((93, 131), 'os.path.join', 'os.path.join', (['dirname', '"""../config.ini"""'], {}), "(dirname, '../config.ini')\n", (105, 131), False, 'import os\n'), ((142, 169), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (167, 169), False, 'import configparser\n'), ((213, 232), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (230, 232), False, 'import logging\n'), ((243, 266), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (264, 266), False, 'import logging\n'), ((279, 351), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""'], {}), "('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n", (296, 351), False, 'import logging\n')]
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to wait for operation completion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.data_fusion import datafusion as df
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.data_fusion import operation_poller
from googlecloudsdk.command_lib.data_fusion import resource_args
class Wait(base.SilentCommand):
"""Wait for asynchronous operation to complete.
## EXAMPLES
To wait for operation 'my-operation' in project 'my-project' and location
'my-location', run:
$ {command} --project=my-project --location=my-location my-operation
"""
WAIT_CEILING_MS = 60 * 20 * 1000
@staticmethod
def Args(parser):
resource_args.AddOperationResourceArg(parser, 'The operation to wait for.')
def Run(self, args):
datafusion = df.Datafusion()
operation_ref = args.CONCEPTS.operation.Parse()
req = datafusion.messages.DatafusionProjectsLocationsOperationsGetRequest(
name=operation_ref.RelativeName())
operation = datafusion.client.projects_locations_operations.Get(req)
waiter.WaitFor(
operation_poller.OperationPoller(),
operation.name,
'Waiting for [{}] to complete.'.format(operation.name),
wait_ceiling_ms=self.WAIT_CEILING_MS)
|
[
"googlecloudsdk.command_lib.data_fusion.resource_args.AddOperationResourceArg",
"googlecloudsdk.command_lib.data_fusion.operation_poller.OperationPoller",
"googlecloudsdk.api_lib.data_fusion.datafusion.Datafusion"
] |
[((1422, 1497), 'googlecloudsdk.command_lib.data_fusion.resource_args.AddOperationResourceArg', 'resource_args.AddOperationResourceArg', (['parser', '"""The operation to wait for."""'], {}), "(parser, 'The operation to wait for.')\n", (1459, 1497), False, 'from googlecloudsdk.command_lib.data_fusion import resource_args\n'), ((1539, 1554), 'googlecloudsdk.api_lib.data_fusion.datafusion.Datafusion', 'df.Datafusion', ([], {}), '()\n', (1552, 1554), True, 'from googlecloudsdk.api_lib.data_fusion import datafusion as df\n'), ((1833, 1867), 'googlecloudsdk.command_lib.data_fusion.operation_poller.OperationPoller', 'operation_poller.OperationPoller', ([], {}), '()\n', (1865, 1867), False, 'from googlecloudsdk.command_lib.data_fusion import operation_poller\n')]
|
# -*- coding: utf-8 -*-
"""groupby procedure for recipes"""
import fnmatch
import logging
from typing import List
from .. helpers import debuggable, mkfunc
from .. model.ingredient import DataPointIngredient
from .. model.chef import Chef
logger = logging.getLogger('groupby')
@debuggable
def groupby(chef: Chef, ingredients: List[DataPointIngredient], result, **options) -> DataPointIngredient:
"""group ingredient data by column(s) and run aggregate function
.. highlight:: yaml
Procedure format:
::
procedure: groupby
ingredients: # list of ingredient id
- ingredient_id
result: str # new ingredient id
options:
groupby: str or list # column(s) to group
aggregate: dict # function block
transform: dict # function block
filter: dict # function block
The function block should have below format:
::
aggregate:
column1: func_name1
column2: func_name2
or
::
aggrgrate:
column1:
function: func_name
param1: foo
param2: baz
wildcard is supported in the column names. So ``aggreagte: {"*": "sum"}`` will run on every indicator in
the ingredient
Keyword Args
------------
groupby : `str` or `list`
the column(s) to group, can be a list or a string
insert_key : `dict`
manually insert keys in to result. This is useful when we want to add back the
aggregated column and set them to one value. For example ``geo: global`` inserts
the ``geo`` column with all values are "global"
aggregate
transform
filter : `dict`, optinoal
the function to run. only one of `aggregate`, `transform` and `filter` should be supplied.
Note
----
- Only one of ``aggregate``, ``transform`` or ``filter`` can be used in one procedure.
- Any columns not mentioned in groupby or functions are dropped.
"""
assert len(ingredients) == 1, "procedure only support 1 ingredient for now."
# ingredient = chef.dag.get_node(ingredients[0]).evaluate()
ingredient = ingredients[0]
logger.info("groupby: " + ingredient.id)
data = ingredient.get_data()
by = options.pop('groupby')
if 'insert_key' in options:
insert_key = options.pop('insert_key')
else:
insert_key = dict()
# only one of aggregate/transform/filter should be in options.
assert len(list(options.keys())) == 1
comp_type = list(options.keys())[0]
assert comp_type in ['aggregate', 'transform', 'filter']
if comp_type == 'aggregate': # only aggregate should change the key of ingredient
if isinstance(by, list):
newkey = ','.join(by)
else:
newkey = by
by = [by]
logger.debug("changing the key to: " + str(newkey))
else:
newkey = ingredient.key
by = [by]
newdata = dict()
for name_tmpl, func in options[comp_type].items():
func = mkfunc(func)
indicator_names = fnmatch.filter(data.keys(), name_tmpl)
for k in indicator_names:
df = data[k].compute()
if comp_type == 'aggregate':
newdata[k] = (df.groupby(by=by).agg({k: func})
.reset_index().dropna())
if comp_type == 'transform':
df = df.set_index(ingredient.key)
levels = [df.index.names.index(x) for x in by]
newdata[k] = (df.groupby(level=levels)[k].transform(func)
.reset_index().dropna())
if comp_type == 'filter':
df = df.set_index(ingredient.key)
levels = [df.index.names.index(x) for x in by]
newdata[k] = (df.groupby(level=levels)[k].filter(func)
.reset_index().dropna())
for col, val in insert_key.items():
newdata[k][col] = val
newkey = newkey + ',' + col
return DataPointIngredient.from_procedure_result(result, newkey, data_computed=newdata)
|
[
"logging.getLogger"
] |
[((254, 282), 'logging.getLogger', 'logging.getLogger', (['"""groupby"""'], {}), "('groupby')\n", (271, 282), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
import pickle
import numpy as np
from Conv_Bidrect_LSTM import CBLSTM
import tensorflow as tf
def load_data(normal_stat=False):
if normal_stat:
filepath = "./data/data_normal.p"
else:
filepath = "./data/data_seq.p"
with open(filepath, mode='rb') as f:
x = pickle.load(f, encoding='latin1')
return x[0], x[1], x[2], x[3] # retrun train_x, train_y, test_x, test_y
if __name__ == '__main__':
train_x, train_y, test_x, test_y = load_data()
print(train_x.shape)
print(train_y.shape)
l = 20 # time steps
d = 70 # data length
k = 50 # filter number
m = 4 # filter size
s = 2 # pool size
batch_size = 30 # batch size
train_x = train_x.reshape([-1, l, d, 1])
test_x = test_x.reshape([-1, l, d, 1])
model = CBLSTM(MODEL_TYPE = 'Regression',
FILTER_NUM = k,
FILTER_SIZE = m,
POOL_SIZE = s,
INPUT_LENGTH = d,
TIME_STEP = l,
CELL_UNITS = [50, 100],
FULL_UNITS = [100, 200],
KEEP_PROB = 0.5,
OUTPUT_NUM = 1, )
model.train_model(train_x = train_x,
train_y = train_y,
test_x = test_x,
test_y = test_y,
batch_size = batch_size,
num_epochs = 100,
num_threads = 4, )
|
[
"pickle.load",
"Conv_Bidrect_LSTM.CBLSTM"
] |
[((821, 1006), 'Conv_Bidrect_LSTM.CBLSTM', 'CBLSTM', ([], {'MODEL_TYPE': '"""Regression"""', 'FILTER_NUM': 'k', 'FILTER_SIZE': 'm', 'POOL_SIZE': 's', 'INPUT_LENGTH': 'd', 'TIME_STEP': 'l', 'CELL_UNITS': '[50, 100]', 'FULL_UNITS': '[100, 200]', 'KEEP_PROB': '(0.5)', 'OUTPUT_NUM': '(1)'}), "(MODEL_TYPE='Regression', FILTER_NUM=k, FILTER_SIZE=m, POOL_SIZE=s,\n INPUT_LENGTH=d, TIME_STEP=l, CELL_UNITS=[50, 100], FULL_UNITS=[100, 200\n ], KEEP_PROB=0.5, OUTPUT_NUM=1)\n", (827, 1006), False, 'from Conv_Bidrect_LSTM import CBLSTM\n'), ((318, 351), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (329, 351), False, 'import pickle\n')]
|
#!/usr/bin/env python
# Programmer(s): <NAME>.
# This file is part of the 'exphydro.lumped' package.
from hydroutils import Parameter
######################################################################
class ExphydroParameters(object):
def __init__(self):
""" Each parameter set contains a random realisation of all six
EXP-HYDRO parameters as well as default values of Nash-Sutcliffe
and Kling-Gupta efficiencies
"""
self.f = Parameter(0, 0.1)
self.smax = Parameter(100.0, 1500.0)
self.qmax = Parameter(10.0, 50.0)
self.ddf = Parameter(0.0, 5.0)
self.mint = Parameter(-3.0, 0.0)
self.maxt = Parameter(0.0, 3.0)
self.objval = -9999 # This is the objective function value
# ----------------------------------------------------------------
def assignvalues(self, f, smax, qmax, ddf, mint, maxt):
""" This method is used to manually assign parameter values,
which are given by the user as input arguments.
"""
self.f.value = f
self.smax.value = smax
self.qmax.value = qmax
self.ddf.value = ddf
self.mint.value = mint
self.maxt.value = maxt
# ----------------------------------------------------------------
def updateparameters(self, param1, param2, w):
""" This method is used for PSO algorithm.
Each parameter in the model has to do the following
two things:
(1) Update its velocity
(2) Update its value
"""
# Update parameter velocities
self.f.updatevelocity(param1.f, param2.f, w)
self.ddf.updatevelocity(param1.ddf, param2.ddf, w)
self.smax.updatevelocity(param1.smax, param2.smax, w)
self.qmax.updatevelocity(param1.qmax, param2.qmax, w)
self.mint.updatevelocity(param1.mint, param2.mint, w)
self.maxt.updatevelocity(param1.maxt, param2.maxt, w)
# Update parameter values
self.f.updatevalue()
self.ddf.updatevalue()
self.smax.updatevalue()
self.qmax.updatevalue()
self.mint.updatevalue()
self.maxt.updatevalue()
######################################################################
|
[
"hydroutils.Parameter"
] |
[((501, 518), 'hydroutils.Parameter', 'Parameter', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (510, 518), False, 'from hydroutils import Parameter\n'), ((540, 564), 'hydroutils.Parameter', 'Parameter', (['(100.0)', '(1500.0)'], {}), '(100.0, 1500.0)\n', (549, 564), False, 'from hydroutils import Parameter\n'), ((586, 607), 'hydroutils.Parameter', 'Parameter', (['(10.0)', '(50.0)'], {}), '(10.0, 50.0)\n', (595, 607), False, 'from hydroutils import Parameter\n'), ((628, 647), 'hydroutils.Parameter', 'Parameter', (['(0.0)', '(5.0)'], {}), '(0.0, 5.0)\n', (637, 647), False, 'from hydroutils import Parameter\n'), ((669, 689), 'hydroutils.Parameter', 'Parameter', (['(-3.0)', '(0.0)'], {}), '(-3.0, 0.0)\n', (678, 689), False, 'from hydroutils import Parameter\n'), ((711, 730), 'hydroutils.Parameter', 'Parameter', (['(0.0)', '(3.0)'], {}), '(0.0, 3.0)\n', (720, 730), False, 'from hydroutils import Parameter\n')]
|
import torch
import pyro
from pyro.nn import pyro_method
from pyro.distributions import Normal, Bernoulli, TransformedDistribution
from pyro.distributions.conditional import ConditionalTransformedDistribution
from deepscm.distributions.transforms.affine import ConditionalAffineTransform
from pyro.nn import DenseNN
from deepscm.experiments.medical.ukbb.sem_vi.base_sem_experiment import BaseVISEM, MODEL_REGISTRY
class ConditionalVISEM(BaseVISEM):
context_dim = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
# ventricle_volume flow
ventricle_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.ventricle_volume_flow_components = ConditionalAffineTransform(context_nn=ventricle_volume_net, event_dim=0)
self.ventricle_volume_flow_transforms = [self.ventricle_volume_flow_components, self.ventricle_volume_flow_constraint_transforms]
# brain_volume flow
brain_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.brain_volume_flow_components = ConditionalAffineTransform(context_nn=brain_volume_net, event_dim=0)
self.brain_volume_flow_transforms = [self.brain_volume_flow_components, self.brain_volume_flow_constraint_transforms]
@pyro_method
def pgm_model(self):
sex_dist = Bernoulli(logits=self.sex_logits).to_event(1)
_ = self.sex_logits
sex = pyro.sample('sex', sex_dist)
age_base_dist = Normal(self.age_base_loc, self.age_base_scale).to_event(1)
age_dist = TransformedDistribution(age_base_dist, self.age_flow_transforms)
age = pyro.sample('age', age_dist)
age_ = self.age_flow_constraint_transforms.inv(age)
# pseudo call to thickness_flow_transforms to register with pyro
_ = self.age_flow_components
brain_context = torch.cat([sex, age_], 1)
brain_volume_base_dist = Normal(self.brain_volume_base_loc, self.brain_volume_base_scale).to_event(1)
brain_volume_dist = ConditionalTransformedDistribution(brain_volume_base_dist, self.brain_volume_flow_transforms).condition(brain_context)
brain_volume = pyro.sample('brain_volume', brain_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.brain_volume_flow_components
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
ventricle_context = torch.cat([age_, brain_volume_], 1)
ventricle_volume_base_dist = Normal(self.ventricle_volume_base_loc, self.ventricle_volume_base_scale).to_event(1)
ventricle_volume_dist = ConditionalTransformedDistribution(ventricle_volume_base_dist, self.ventricle_volume_flow_transforms).condition(ventricle_context) # noqa: E501
ventricle_volume = pyro.sample('ventricle_volume', ventricle_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.ventricle_volume_flow_components
return age, sex, ventricle_volume, brain_volume
@pyro_method
def model(self):
age, sex, ventricle_volume, brain_volume = self.pgm_model()
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
z = pyro.sample('z', Normal(self.z_loc, self.z_scale).to_event(1))
latent = torch.cat([z, ventricle_volume_, brain_volume_], 1)
x_dist = self._get_transformed_x_dist(latent)
x = pyro.sample('x', x_dist)
return x, z, age, sex, ventricle_volume, brain_volume
@pyro_method
def guide(self, x, age, sex, ventricle_volume, brain_volume):
with pyro.plate('observations', x.shape[0]):
hidden = self.encoder(x)
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
hidden = torch.cat([hidden, ventricle_volume_, brain_volume_], 1)
latent_dist = self.latent_encoder.predict(hidden)
z = pyro.sample('z', latent_dist)
return z
MODEL_REGISTRY[ConditionalVISEM.__name__] = ConditionalVISEM
|
[
"deepscm.distributions.transforms.affine.ConditionalAffineTransform",
"pyro.plate",
"pyro.distributions.TransformedDistribution",
"pyro.distributions.Normal",
"torch.cat",
"pyro.sample",
"pyro.distributions.Bernoulli",
"pyro.distributions.conditional.ConditionalTransformedDistribution",
"torch.nn.LeakyReLU"
] |
[((731, 803), 'deepscm.distributions.transforms.affine.ConditionalAffineTransform', 'ConditionalAffineTransform', ([], {'context_nn': 'ventricle_volume_net', 'event_dim': '(0)'}), '(context_nn=ventricle_volume_net, event_dim=0)\n', (757, 803), False, 'from deepscm.distributions.transforms.affine import ConditionalAffineTransform\n'), ((1118, 1186), 'deepscm.distributions.transforms.affine.ConditionalAffineTransform', 'ConditionalAffineTransform', ([], {'context_nn': 'brain_volume_net', 'event_dim': '(0)'}), '(context_nn=brain_volume_net, event_dim=0)\n', (1144, 1186), False, 'from deepscm.distributions.transforms.affine import ConditionalAffineTransform\n'), ((1465, 1493), 'pyro.sample', 'pyro.sample', (['"""sex"""', 'sex_dist'], {}), "('sex', sex_dist)\n", (1476, 1493), False, 'import pyro\n'), ((1597, 1661), 'pyro.distributions.TransformedDistribution', 'TransformedDistribution', (['age_base_dist', 'self.age_flow_transforms'], {}), '(age_base_dist, self.age_flow_transforms)\n', (1620, 1661), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n'), ((1677, 1705), 'pyro.sample', 'pyro.sample', (['"""age"""', 'age_dist'], {}), "('age', age_dist)\n", (1688, 1705), False, 'import pyro\n'), ((1901, 1926), 'torch.cat', 'torch.cat', (['[sex, age_]', '(1)'], {}), '([sex, age_], 1)\n', (1910, 1926), False, 'import torch\n'), ((2209, 2255), 'pyro.sample', 'pyro.sample', (['"""brain_volume"""', 'brain_volume_dist'], {}), "('brain_volume', brain_volume_dist)\n", (2220, 2255), False, 'import pyro\n'), ((2492, 2527), 'torch.cat', 'torch.cat', (['[age_, brain_volume_]', '(1)'], {}), '([age_, brain_volume_], 1)\n', (2501, 2527), False, 'import torch\n'), ((2856, 2910), 'pyro.sample', 'pyro.sample', (['"""ventricle_volume"""', 'ventricle_volume_dist'], {}), "('ventricle_volume', ventricle_volume_dist)\n", (2867, 2910), False, 'import pyro\n'), ((3479, 3530), 'torch.cat', 'torch.cat', (['[z, ventricle_volume_, brain_volume_]', '(1)'], {}), '([z, ventricle_volume_, brain_volume_], 1)\n', (3488, 3530), False, 'import torch\n'), ((3599, 3623), 'pyro.sample', 'pyro.sample', (['"""x"""', 'x_dist'], {}), "('x', x_dist)\n", (3610, 3623), False, 'import pyro\n'), ((3784, 3822), 'pyro.plate', 'pyro.plate', (['"""observations"""', 'x.shape[0]'], {}), "('observations', x.shape[0])\n", (3794, 3822), False, 'import pyro\n'), ((4078, 4134), 'torch.cat', 'torch.cat', (['[hidden, ventricle_volume_, brain_volume_]', '(1)'], {}), '([hidden, ventricle_volume_, brain_volume_], 1)\n', (4087, 4134), False, 'import torch\n'), ((4215, 4244), 'pyro.sample', 'pyro.sample', (['"""z"""', 'latent_dist'], {}), "('z', latent_dist)\n", (4226, 4244), False, 'import pyro\n'), ((659, 682), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (677, 682), False, 'import torch\n'), ((1050, 1073), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1068, 1073), False, 'import torch\n'), ((1375, 1408), 'pyro.distributions.Bernoulli', 'Bernoulli', ([], {'logits': 'self.sex_logits'}), '(logits=self.sex_logits)\n', (1384, 1408), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n'), ((1519, 1565), 'pyro.distributions.Normal', 'Normal', (['self.age_base_loc', 'self.age_base_scale'], {}), '(self.age_base_loc, self.age_base_scale)\n', (1525, 1565), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n'), ((1961, 2025), 'pyro.distributions.Normal', 'Normal', (['self.brain_volume_base_loc', 'self.brain_volume_base_scale'], {}), '(self.brain_volume_base_loc, self.brain_volume_base_scale)\n', (1967, 2025), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n'), ((2066, 2164), 'pyro.distributions.conditional.ConditionalTransformedDistribution', 'ConditionalTransformedDistribution', (['brain_volume_base_dist', 'self.brain_volume_flow_transforms'], {}), '(brain_volume_base_dist, self.\n brain_volume_flow_transforms)\n', (2100, 2164), False, 'from pyro.distributions.conditional import ConditionalTransformedDistribution\n'), ((2566, 2638), 'pyro.distributions.Normal', 'Normal', (['self.ventricle_volume_base_loc', 'self.ventricle_volume_base_scale'], {}), '(self.ventricle_volume_base_loc, self.ventricle_volume_base_scale)\n', (2572, 2638), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n'), ((2683, 2789), 'pyro.distributions.conditional.ConditionalTransformedDistribution', 'ConditionalTransformedDistribution', (['ventricle_volume_base_dist', 'self.ventricle_volume_flow_transforms'], {}), '(ventricle_volume_base_dist, self.\n ventricle_volume_flow_transforms)\n', (2717, 2789), False, 'from pyro.distributions.conditional import ConditionalTransformedDistribution\n'), ((3415, 3447), 'pyro.distributions.Normal', 'Normal', (['self.z_loc', 'self.z_scale'], {}), '(self.z_loc, self.z_scale)\n', (3421, 3447), False, 'from pyro.distributions import Normal, Bernoulli, TransformedDistribution\n')]
|
"""Helpers for Bayesian Modelling.
"""
import inspect
class NoOpContext:
def __enter__(self):
return self
def __exit__(self, exc, exc_type, traceback):
pass
class Model(NoOpContext):
"""Definition of a model
Usage::
with bayes.Model() as model:
@model.observe
def _(s):
s.x = tf.placeholder(dtype=floatx, shape=[None, 1], name='x')
s.y = tf.placeholder(dtype=floatx, shape=[None], name='y')
@model.define
def _(s, lam=0.5):
s.p.w = tf.distributions.Normal(loc=0.0, scale=1.0 / lam)
s.p.y = tf.distributions.Normal(loc=tf.squeeze(s.x @ s.w[:, None]), scale=1.0)
@model.inference
def _(s):
_, n_features = get_shape(s.x)
s.q.w = tf.distributions.Normal(
loc=tf.get_variable('w_loc', shape=[n_features], dtype=floatx),
scale=tf.nn.softplus(tf.get_variable('w_scale', shape=[n_features], dtype=floatx)),
)
"""
def __init__(self):
self._scope = {"observed": {}}
self._observed = None
self._definition = None
self._inference = None
self._built = False
def observe(self, func):
self._observed = func
return func
def define(self, func):
self._definition = func
return func
def inference(self, func):
self._inference = func
return func
# TODO: deprecate
def __getitem__(self, key):
self._ensure_observed()
if isinstance(key, tuple):
return tuple(self._scope["observed"][k] for k in key)
return self._scope["observed"][key]
def get(self, *args, **kwargs):
kwargs.setdefault("ensure_loss", "loss" in args)
scope = self.build(**kwargs)
res = []
for k in args:
if k == "loss":
res.append(scope["loss"])
elif k in scope["observed"]:
res.append(scope["observed"][k])
elif k in scope["latent"]:
res.append(scope["latent"][k])
else:
raise KeyError(f"cannot get {k}")
return res[0] if len(res) == 1 else tuple(res)
def build(self, scope=None, latent_strategy=None, ensure_loss=True):
import tensorflow as tf
if scope is None:
scope = {}
if latent_strategy is None:
latent_strategy = sample_latent
self._ensure_observed()
scope = dict(self._scope, **scope)
scope = Scope(scope, latent_strategy=latent_strategy)
with tf.variable_scope("inference", reuse=tf.AUTO_REUSE):
self._inference(scope)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
scope._scope["loss"] = self._definition(scope)
if ensure_loss and scope._scope["loss"] is None:
scope._scope["loss"] = _build_kl_loss(scope._scope)
return scope.get()
def _ensure_observed(self):
if self._built:
return
if self._observed is None:
self._built = True
return
with DictWrapper(self._scope["observed"]) as s:
self._observed(s)
self._built = True
class DictWrapper(NoOpContext):
def __init__(self, d):
super().__setattr__("_target", d)
def __setattr__(self, k, v):
self._target[k] = v
def __getattr__(self, k):
try:
return self._target[k]
except KeyError:
raise AttributeError(k)
class Scope:
def __init__(self, scope, latent_strategy=None):
if latent_strategy is None:
latent_strategy = sample_latent
self._scope = dict(scope)
self._scope.setdefault("q", {})
self._scope.setdefault("p", {})
self._scope.setdefault("latent", {})
self._latent_strategy = latent_strategy
def __getattr__(self, key):
scope = self._scope
if key in scope["latent"]:
return scope["latent"][key]
if key in scope["observed"]:
return scope["observed"][key]
if key in scope["q"]:
self._latent_strategy(scope, key)
return scope["latent"][key]
raise AttributeError(key)
def get(self):
return self._scope
@property
def p(self):
return DictWrapper(self._scope["p"])
@property
def q(self):
return DictWrapper(self._scope["q"])
def build(model, *defs, latent_strategy=None):
scope = model.build(latent_strategy=latent_strategy)
if not defs:
return scope
res = []
for f in defs:
spec = inspect.getfullargspec(f)
# TODO: raise error for unsupported features
args = [_lookup_dist(scope, arg) for arg in spec.args]
res.append(f(*args))
if len(defs) == 1:
return res[0]
return tuple(res)
def _lookup_dist(scope, k):
return scope["p"][k] if k in scope["observed"] else scope["q"][k]
def sample_prior(scope, key):
scope["latent"][key] = scope["p"][key].sample()
def sample_latent(scope, key):
scope["latent"][key] = scope["q"][key].sample()
def sample_latent_no_grad(scope, key):
import tensorflow as tf
scope["latent"][key] = tf.stop_gradient(scope["q"][key].sample())
def average_latent(scope, key):
scope["latent"][key] = scope["q"][key].mean()
def build_reparam_loss(model):
import tensorflow as tf
# TODO: raise warning if non-re-parametrizable
scope = (
model if isinstance(model, dict) else model.build(latent_strategy=sample_latent)
)
loss = tf.reduce_mean(scope["loss"])
return loss, loss
def build_score_loss(model, var_list=None):
import tensorflow as tf
scope = (
model
if isinstance(model, dict)
else model.build(latent_strategy=sample_latent_no_grad)
)
if var_list is None:
var_list = tf.trainable_variables()
grad_q = 0
for k, q in scope["q"].items():
v = scope["latent"][k]
grad_q += q.log_prob(v)
return (
tf.reduce_mean(scope["loss"]),
tf.reduce_mean(scope["loss"] + tf.stop_gradient(scope["loss"]) * grad_q),
)
def relax_bernoulli(p, temperature=1.0):
"""Create a relaxed sample from a Bernoulli distribution.
:param tf.distributions.Bernoulli p:
the bernoulli distribution from which to sample
:param float temperature:
the temperature used for relaxed quantities
:returns:
a triple of sampled variable, relaxed variable and relaxed variable
conditioned on the non-relaxed variable.
"""
import tensorflow as tf
u = tf.random_uniform(tf.shape(p.probs))
z = tf.log(p.probs / (1.0 - p.probs)) + tf.log(u / (1.0 - u))
b = tf.cast(z > 0, dtype=z.dtype)
b = tf.stop_gradient(b)
b_relaxed = tf.sigmoid(z / temperature)
nu = tf.random_uniform(tf.shape(b))
nu_cond = (nu * (1 - p.probs)) * (1 - b) + (1 - p.probs * nu) * b
z_cond = tf.log(p.probs / (1.0 - p.probs)) + tf.log(nu_cond / (1.0 - nu_cond))
b_cond_relaxed = tf.sigmoid(z_cond / temperature)
return b, b_relaxed, b_cond_relaxed
def relax_categorical(p, temperature=1.0):
"""Create a relaxed sample from a OneHotCategorical distribution.
:param Union[tf.distributions.Mutltinomial,tf.contrib.distributions.OneHotCategorical] p:
the categorical distribution from which to sample. If specified as a
Multinomial, the total count has to be 1.
:param float temperature:
the temperature used for relaxed quantities
:returns:
a triple of sampled variable, relaxed variable and relaxed variable
conditioned on the non-relaxed variable.
"""
import tensorflow as tf
if isinstance(p, tf.distributions.Multinomial):
control_deps = [
tf.assert_equal(p.total_count, 1.0, message="can only relax categoricals")
]
event_size = tf.shape(p.probs)[-1]
else:
control_deps = []
event_size = p.event_size
z = tf.log(p.probs) - tf.log(-tf.log(tf.random_uniform(tf.shape(p.probs))))
b = tf.argmax(z, axis=-1)
b = tf.one_hot(b, event_size)
with tf.control_dependencies(control_deps):
b = tf.stop_gradient(b)
b_relaxed = tf.nn.softmax(z / temperature)
alpha = (1.0 - p.probs) / p.probs
theta_b = tf.reduce_sum(p.probs * b, keep_dims=True, axis=-1)
u_i_exp = 1 - b
u_b_exp = b + (1 - b) * p.probs / theta_b
u_b = tf.random_uniform(tf.shape(p.probs)) ** (1.0 / (1.0 + alpha))
u_b = tf.reduce_sum(u_b * b, keep_dims=True, axis=-1)
u_i = tf.random_uniform(tf.shape(p.probs))
u_cond = (u_i ** u_i_exp) * (u_b ** u_b_exp)
z_cond = tf.log(p.probs) - tf.log(-tf.log(u_cond))
b_cond_relaxed = tf.nn.softmax(z_cond / temperature)
return b, b_relaxed, b_cond_relaxed
def build_relax_loss(model):
"""Build the RELAX loss.
Described in <NAME> al., "Backpropagation through the Void:
Optimizing control variates for black-box gradient estimation", 2017,
found at ``https://arxiv.org/abs/1711.00123``.
:param Model model:
the model to build the REBAR loss for
:returns:
a pair of loss and train loss
"""
import tensorflow as tf
scope = model.build(latent_strategy=relax_latent_strategy)
scope_cond_relaxed = dict(
scope, latent=scope["latent_cond_relaxed"].copy(), p={}, loss=None
)
scope_cond_relaxed = model.build(
scope=scope_cond_relaxed, latent_strategy=raise_latent_strategy
)
scope_relaxed = dict(scope, latent=scope["latent_relaxed"].copy(), p={}, loss=None)
scope_relaxed = model.build(
scope=scope_relaxed, latent_strategy=raise_latent_strategy
)
grad_q = 0
for k, q in scope["q"].items():
v = scope["latent"][k]
grad_q += q.log_prob(v)
loss = (
scope["loss"]
+ tf.stop_gradient(scope["loss"] - scope_cond_relaxed["loss"]) * grad_q
+ scope_relaxed["loss"]
- scope_cond_relaxed["loss"]
)
return tf.reduce_mean(scope["loss"]), tf.reduce_mean(loss)
def relax_latent_strategy(scope, key):
import tensorflow as tf
p = scope["q"][key]
if isinstance(p, tf.distributions.Bernoulli):
v, v_relaxed, v_cond_relaxed = relax_bernoulli(p)
elif isinstance(p, tf.distributions.Multinomial):
v, v_relaxed, v_cond_relaxed = relax_categorical(p)
elif isinstance(p, tf.contrib.distributions.OneHotCategorical):
v, v_relaxed, v_cond_relaxed = relax_categorical(p)
elif isinstance(p, tf.distributions.Categorical):
raise NotImplementedError(
"use Multinomial with total_count = 1 or OneHotCategorical"
)
else:
v = v_relaxed = v_cond_relaxed = p.sample()
v = tf.stop_gradient(v)
scope.setdefault("latent", {})[key] = v
scope.setdefault("latent_relaxed", {})[key] = v_relaxed
scope.setdefault("latent_cond_relaxed", {})[key] = v_cond_relaxed
def raise_latent_strategy(scope, key):
"""Raise for non-existing latent variables"""
raise RuntimeError(f"latent variable {key} does not exit")
def _build_kl_loss(scope):
loss = 0
for k, p in scope["p"].items():
if k in scope["latent"]:
v = scope["latent"][k]
else:
v = scope["observed"][k]
loss += p.log_prob(v)
for q in scope["q"].values():
loss += q.entropy()
return -loss
|
[
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.reduce_sum",
"inspect.getfullargspec",
"tensorflow.trainable_variables",
"tensorflow.control_dependencies",
"tensorflow.argmax",
"tensorflow.stop_gradient",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.log",
"tensorflow.sigmoid",
"tensorflow.assert_equal"
] |
[((5711, 5740), 'tensorflow.reduce_mean', 'tf.reduce_mean', (["scope['loss']"], {}), "(scope['loss'])\n", (5725, 5740), True, 'import tensorflow as tf\n'), ((6879, 6908), 'tensorflow.cast', 'tf.cast', (['(z > 0)'], {'dtype': 'z.dtype'}), '(z > 0, dtype=z.dtype)\n', (6886, 6908), True, 'import tensorflow as tf\n'), ((6917, 6936), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['b'], {}), '(b)\n', (6933, 6936), True, 'import tensorflow as tf\n'), ((6954, 6981), 'tensorflow.sigmoid', 'tf.sigmoid', (['(z / temperature)'], {}), '(z / temperature)\n', (6964, 6981), True, 'import tensorflow as tf\n'), ((7198, 7230), 'tensorflow.sigmoid', 'tf.sigmoid', (['(z_cond / temperature)'], {}), '(z_cond / temperature)\n', (7208, 7230), True, 'import tensorflow as tf\n'), ((8247, 8268), 'tensorflow.argmax', 'tf.argmax', (['z'], {'axis': '(-1)'}), '(z, axis=-1)\n', (8256, 8268), True, 'import tensorflow as tf\n'), ((8277, 8302), 'tensorflow.one_hot', 'tf.one_hot', (['b', 'event_size'], {}), '(b, event_size)\n', (8287, 8302), True, 'import tensorflow as tf\n'), ((8401, 8431), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(z / temperature)'], {}), '(z / temperature)\n', (8414, 8431), True, 'import tensorflow as tf\n'), ((8485, 8536), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(p.probs * b)'], {'keep_dims': '(True)', 'axis': '(-1)'}), '(p.probs * b, keep_dims=True, axis=-1)\n', (8498, 8536), True, 'import tensorflow as tf\n'), ((8687, 8734), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(u_b * b)'], {'keep_dims': '(True)', 'axis': '(-1)'}), '(u_b * b, keep_dims=True, axis=-1)\n', (8700, 8734), True, 'import tensorflow as tf\n'), ((8911, 8946), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(z_cond / temperature)'], {}), '(z_cond / temperature)\n', (8924, 8946), True, 'import tensorflow as tf\n'), ((4744, 4769), 'inspect.getfullargspec', 'inspect.getfullargspec', (['f'], {}), '(f)\n', (4766, 4769), False, 'import inspect\n'), ((6016, 6040), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6038, 6040), True, 'import tensorflow as tf\n'), ((6178, 6207), 'tensorflow.reduce_mean', 'tf.reduce_mean', (["scope['loss']"], {}), "(scope['loss'])\n", (6192, 6207), True, 'import tensorflow as tf\n'), ((6786, 6803), 'tensorflow.shape', 'tf.shape', (['p.probs'], {}), '(p.probs)\n', (6794, 6803), True, 'import tensorflow as tf\n'), ((6813, 6846), 'tensorflow.log', 'tf.log', (['(p.probs / (1.0 - p.probs))'], {}), '(p.probs / (1.0 - p.probs))\n', (6819, 6846), True, 'import tensorflow as tf\n'), ((6849, 6870), 'tensorflow.log', 'tf.log', (['(u / (1.0 - u))'], {}), '(u / (1.0 - u))\n', (6855, 6870), True, 'import tensorflow as tf\n'), ((7010, 7021), 'tensorflow.shape', 'tf.shape', (['b'], {}), '(b)\n', (7018, 7021), True, 'import tensorflow as tf\n'), ((7106, 7139), 'tensorflow.log', 'tf.log', (['(p.probs / (1.0 - p.probs))'], {}), '(p.probs / (1.0 - p.probs))\n', (7112, 7139), True, 'import tensorflow as tf\n'), ((7142, 7175), 'tensorflow.log', 'tf.log', (['(nu_cond / (1.0 - nu_cond))'], {}), '(nu_cond / (1.0 - nu_cond))\n', (7148, 7175), True, 'import tensorflow as tf\n'), ((8166, 8181), 'tensorflow.log', 'tf.log', (['p.probs'], {}), '(p.probs)\n', (8172, 8181), True, 'import tensorflow as tf\n'), ((8313, 8350), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['control_deps'], {}), '(control_deps)\n', (8336, 8350), True, 'import tensorflow as tf\n'), ((8364, 8383), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['b'], {}), '(b)\n', (8380, 8383), True, 'import tensorflow as tf\n'), ((8764, 8781), 'tensorflow.shape', 'tf.shape', (['p.probs'], {}), '(p.probs)\n', (8772, 8781), True, 'import tensorflow as tf\n'), ((8847, 8862), 'tensorflow.log', 'tf.log', (['p.probs'], {}), '(p.probs)\n', (8853, 8862), True, 'import tensorflow as tf\n'), ((10205, 10234), 'tensorflow.reduce_mean', 'tf.reduce_mean', (["scope['loss']"], {}), "(scope['loss'])\n", (10219, 10234), True, 'import tensorflow as tf\n'), ((10236, 10256), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (10250, 10256), True, 'import tensorflow as tf\n'), ((2682, 2733), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""inference"""'], {'reuse': 'tf.AUTO_REUSE'}), "('inference', reuse=tf.AUTO_REUSE)\n", (2699, 2733), True, 'import tensorflow as tf\n'), ((2784, 2831), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('model', reuse=tf.AUTO_REUSE)\n", (2801, 2831), True, 'import tensorflow as tf\n'), ((7958, 8032), 'tensorflow.assert_equal', 'tf.assert_equal', (['p.total_count', '(1.0)'], {'message': '"""can only relax categoricals"""'}), "(p.total_count, 1.0, message='can only relax categoricals')\n", (7973, 8032), True, 'import tensorflow as tf\n'), ((8064, 8081), 'tensorflow.shape', 'tf.shape', (['p.probs'], {}), '(p.probs)\n', (8072, 8081), True, 'import tensorflow as tf\n'), ((8633, 8650), 'tensorflow.shape', 'tf.shape', (['p.probs'], {}), '(p.probs)\n', (8641, 8650), True, 'import tensorflow as tf\n'), ((8873, 8887), 'tensorflow.log', 'tf.log', (['u_cond'], {}), '(u_cond)\n', (8879, 8887), True, 'import tensorflow as tf\n'), ((6248, 6279), 'tensorflow.stop_gradient', 'tf.stop_gradient', (["scope['loss']"], {}), "(scope['loss'])\n", (6264, 6279), True, 'import tensorflow as tf\n'), ((10048, 10108), 'tensorflow.stop_gradient', 'tf.stop_gradient', (["(scope['loss'] - scope_cond_relaxed['loss'])"], {}), "(scope['loss'] - scope_cond_relaxed['loss'])\n", (10064, 10108), True, 'import tensorflow as tf\n'), ((10951, 10970), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['v'], {}), '(v)\n', (10967, 10970), True, 'import tensorflow as tf\n'), ((8217, 8234), 'tensorflow.shape', 'tf.shape', (['p.probs'], {}), '(p.probs)\n', (8225, 8234), True, 'import tensorflow as tf\n')]
|
from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import io
from io import BytesIO
import urllib.request, urllib.parse, urllib.error
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data)
def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = BytesIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.parse.quote(b64encode(imgdata.getvalue()))
send_action("update_plot",params={"src":uri, "name":name})
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg)
|
[
"IPython.get_ipython",
"io.BytesIO",
"ipykernel.comm.Comm"
] |
[((219, 232), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (230, 232), False, 'from IPython import get_ipython\n'), ((646, 678), 'ipykernel.comm.Comm', 'Comm', ([], {'target_name': '"""tdb"""', 'data': '{}'}), "(target_name='tdb', data={})\n", (650, 678), False, 'from ipykernel.comm import Comm\n'), ((986, 995), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (993, 995), False, 'from io import BytesIO\n'), ((519, 532), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (530, 532), False, 'from IPython import get_ipython\n')]
|
from pathlib import Path
from typing import MutableMapping, Any
from coveo_styles.styles import ExitWithFailure
import toml
from toml import TomlDecodeError
def load_toml_from_path(toml_path: Path) -> MutableMapping[str, Any]:
"""Loads a toml from path or raise ExitWithFailure on failure."""
return _load_toml_from_content(toml_path.read_text(), toml_path)
def _load_toml_from_content(toml_content: str, toml_path: Path) -> MutableMapping[str, Any]:
try:
return toml.loads(toml_content)
except TomlDecodeError as ex:
lineno, colno = ex.lineno, ex.colno # type: ignore
raise ExitWithFailure(suggestions=f"{toml_path}:{lineno}:{colno} parse error") from ex
|
[
"toml.loads",
"coveo_styles.styles.ExitWithFailure"
] |
[((488, 512), 'toml.loads', 'toml.loads', (['toml_content'], {}), '(toml_content)\n', (498, 512), False, 'import toml\n'), ((621, 693), 'coveo_styles.styles.ExitWithFailure', 'ExitWithFailure', ([], {'suggestions': 'f"""{toml_path}:{lineno}:{colno} parse error"""'}), "(suggestions=f'{toml_path}:{lineno}:{colno} parse error')\n", (636, 693), False, 'from coveo_styles.styles import ExitWithFailure\n')]
|
from vector import Vector
from movement import movements
def dest_rank(text):
i = len(text) -1
while i >= 0:
if text[i].isdigit():
return ord(text[i]) - 48 - 1
else: i -= 1
raise Exception("No number found in " + text + ".")
def dest_file(text):
i = len(text) - 1
while i >= 0:
if text[i].islower():
return ord(text[i]) - 96 - 1
else: i -= 1
raise Exception("No lowercase char found in " + text + ".")
def convert_file(c):
return ord(c) - 96 - 1
def convert_rank(c):
return ord(c) - 48 - 1
def search_pieces(board, type, file=None, rank=None):
squares = []
file_range = []
rank_range = []
if file != None:
file_range = [file]
else: file_range = range(8)
if rank != None:
rank_range = [rank]
else: rank_range = range(8)
for rank in rank_range:
for file in file_range:
if board.squares[file][rank] == type:
squares.append([file, rank]);
return squares
def clear_path(board, orig, dest, vector):
square = orig[:]
i = 1
while i < 8: # safety measure to prevent infinite loop
square[0] += vector.dx
square[1] += vector.dy
if square[0] == dest[0] and square[1] == dest[1]:
return True
if board.squares[square[0]][square[1]] == ' ':
i += 1
else: return False
def origin_hint(move):
offset = 1 if 'x' in move else 0 # capture
if len(move) < (4 + offset) or move[2 + offset].isdigit():
return None
else: return move[1]
# //http://en.wikipedia.org/wiki/Portable_Game_Notation
def read_algebraic(board, move):
input = None
if move[0] == 'O': # castle
if board.side_to_move == -1:
if move == "O-O": # short
board.squares[5][7] = 'r'
board.squares[6][7] = 'k'
board.squares[7][7] = ' '
board.squares[4][7] = ' '
input = ([4, 7], [6, 7])
elif move == "O-O-O": # long
board.squares[3][7] = 'r'
board.squares[2][7] = 'k'
board.squares[0][7] = ' '
board.squares[4][7] = ' '
input = ([4, 7], [2, 7])
else:
if move == "O-O": # short
board.squares[5][0] = 'R'
board.squares[6][0] = 'K'
board.squares[7][0] = ' '
board.squares[4][0] = ' '
input = ([4, 0], [6, 0])
elif move == "O-O-O": # long
board.squares[3][0] = 'R'
board.squares[2][0] = 'K'
board.squares[0][0] = ' '
board.squares[4][0] = ' '
input = ([4, 0], [2, 0])
else: # not castle
# if 'x' in move: # capture
# captured = self.squares[destfile][destrank]
# if captured == None:
# raise Exception("No piece to capture on " + dest.ToString() + ".")
# self.squares[destfile][destrank] = ' '
destrank = dest_rank(move)
destfile = dest_file(move)
origfile = None
origrank = None
if move[0].islower(): # pawn move
pawns = None
origfile = convert_file(move[0])
if board.side_to_move == 1:
pawns = search_pieces(board, 'P', file=origfile)
else: pawns = search_pieces(board, 'p', file=origfile)
if move[1] == 'x': # capture
origrank = destrank - board.side_to_move
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
else: # not a capture
if len(pawns) == 1: # only one pawn in file
origfile = pawns[0][0]
origrank = pawns[0][1]
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
else: # find pawn closest to destination
i = 1
while i < 8:
origrank = destrank - i * board.side_to_move
if board.squares[origfile][origrank] != ' ':
break
i += 1
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
if move.find('=') != -1: # promotion
pos = move.index('=')
if board.side_to_move == 1:
board.squares[destfile][destrank] = move[pos+1]
else: board.squares[destfile][destrank] = move[pos+1].lower()
else: # piece move
if board.side_to_move == 1:
squares = search_pieces(board, move[0])
else: squares = search_pieces(board, move[0].lower())
if len(squares) == 1: # only one piece
origfile = squares[0][0]
origrank = squares[0][1]
else: # find origin square
orig = None
hint = origin_hint(move)
if not hint: # only one candidate piece
for file, rank in squares:
vector = Vector.create(file, rank, destfile, destrank)
squares = search_pieces(board, move[0])
piece = board.squares[file][rank]
if movements[piece].is_sliding:
vector.normalize()
for v in movements[piece].vectors:
# print move, board.side_to_move, '.', v.dx, v.dy, '|', vector.dx, vector.dy
if v == vector and clear_path(board, [file, rank], [destfile, destrank], v):
orig = [file, rank]
break
else: # several candidates pieces
if hint.isdigit(): # hint is rank
for square in squares:
if square[1] == convert_rank(hint):
orig = square
break
else: # hint is file
for square in squares:
if square[0] == convert_file(hint):
orig = square
break
origfile = orig[0]
origrank = orig[1]
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
board.switch_turn()
# print move, board.to_fen()
return input
|
[
"vector.Vector.create"
] |
[((5106, 5151), 'vector.Vector.create', 'Vector.create', (['file', 'rank', 'destfile', 'destrank'], {}), '(file, rank, destfile, destrank)\n', (5119, 5151), False, 'from vector import Vector\n')]
|
import unittest
import os
import matplotlib.pyplot as plt
import numpy as np
from lorenz.lorenz import make_dataset, plot_3d
import lorenz.dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.path = os.path.join(os.path.split(os.path.split(os.path.dirname(__file__))[0])[0], 'data', 'lorenz.h5')
def test_random_iterator_1d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='1d')
for b in dataset.random_iterator(4, 100):
plt.plot(np.linspace(0,1,b.shape[1]), b[:,:,0].T)
plt.show()
#plot_3d(b)
def test_random_iterator_3d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='3d')
for b in dataset.random_iterator(4, 100):
plot_3d(b)
|
[
"matplotlib.pyplot.show",
"lorenz.lorenz.plot_3d",
"os.path.dirname",
"numpy.random.RandomState",
"numpy.linspace"
] |
[((380, 407), 'numpy.random.RandomState', 'np.random.RandomState', (['(1729)'], {}), '(1729)\n', (401, 407), True, 'import numpy as np\n'), ((684, 711), 'numpy.random.RandomState', 'np.random.RandomState', (['(1729)'], {}), '(1729)\n', (705, 711), True, 'import numpy as np\n'), ((595, 605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (603, 605), True, 'import matplotlib.pyplot as plt\n'), ((837, 847), 'lorenz.lorenz.plot_3d', 'plot_3d', (['b'], {}), '(b)\n', (844, 847), False, 'from lorenz.lorenz import make_dataset, plot_3d\n'), ((542, 571), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'b.shape[1]'], {}), '(0, 1, b.shape[1])\n', (553, 571), True, 'import numpy as np\n'), ((270, 295), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (285, 295), False, 'import os\n')]
|
# Library file on the Computer.
# Must be in the same directory as any file using it's functions.
import socket
import struct
import sys
from threading import Thread, Event
from binascii import crc_hqx
class CompTalk:
def __init__(self, host):
# Variables that define the communication
self.buffer = 1024
self.CRCValue = 0x61
# The __init__ mainly searches for and establishes the connection
port = 12345 # Arbitrary, will be reassigned by the connection.
print('Attempting to connect using ', host)
try:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind((host, port))
except:
sys.exit('Client IP Address was not valid. Check that the correct IP address was entered')
try:
print('Waiting for connection from host')
soc.listen(1)
self.conn, addr = soc.accept()
except:
print('Conneciton request timed out.')
print('Connected by ', addr[0])
print('Press [ctrl + C] on Pi to stop\n')
self.dataStream = []
def _flatten( self, array):
# Takes a multi-dimensional array and flattens it to 1 dimension
return sum( ([x] if not isinstance(x, list) else self._flatten(x) for x in array), [] )
def _convert2list( self, list):
# Unpacks and structures the sent data into a list of the correct number of rows/columns/dimensions
dim = []
# Extract the dimensions of the array
# Format: [Number of Dimensions, Length of Dim1, Length of Dim2, ...Length of DimN, ...Data to be unpacked...]
dimLength = list[0]
for i in range(dimLength):
# Add 1 to skip the first element which defines dim length
dim.append(list[i + 1])
values = list[dimLength+1:]
# Define an interator and build structure the remaining data based on the dimensions extracted
self._iterator = 0
return self._recursiveBuild( dim, values)
def _recursiveBuild( self, dimensions, data):
final = []
# If there's still remaining dimensions, must continue unpacking
if (len(dimensions) > 1):
for i in range(dimensions[0]):
final.append(self._recursiveBuild( dimensions[1:], data))
# If you have all the dimensions, begin building the data
else:
self._iterator += dimensions[0]
return data[self._iterator-dimensions[0]:self._iterator]
# Once finished, return the resulting array
return final
def _unpackFmt( self, data):
# Unpacks the format string for a packet
fmtString = ""
numPackets = struct.unpack("I", data[:4])[0]
# Wait to recieve all of the packets
while(numPackets > 1):
d = self._recvAndCheck()
if not data: return 0
data = data + d
numPackets -= 1
# combine the data into one string
for i in range(4, len(data)):
fmtString = str(fmtString + chr(data[i]))
# Comma's will denote new packets, so split based on those
return fmtString.split(',')
def _unpackData( self, formatStr, data):
# Unpacks the recieved raw data based on the format string
dataSize = { 'i':4, 'f':4, 's':1, '?':1 }
numPackets = len(formatStr)
content = []
p = 0 # Packet number
d = 0
while(p < numPackets):
length = 0
firstElement = True
isList = False
isString = False
i = 0 # index in format string
d = 0 # index in data recieved
# Iterate through all expected packets
while (i < len(formatStr[p])):
# Since anayzed 1 digit at a time, this accounts for 2+ digit numbers
if (formatStr[p][i] == '-'):
break
if (formatStr[p][i] == '0'):
break
if (formatStr[p][i].isdigit()):
length = 10 * length + int(formatStr[p][i])
isList = True
# If not a digit then a data type was identified and something needs to be unpacked
else:
if (length == 0):
length = 1
if (formatStr[p][i] == 's'):
isString = True
string = ''
# append all of the characters for this entry to 1 string variable
for temp in range(length):
string = str(string + chr(data[p][d]))
d += 1 # move to next data entry
if (isList and firstElement and (formatStr[p-1][-1] == '-')):
content[-1] = str(content[-1] + string)
else:
content.append(string)
else:
# Append the next length of data to the resulting content
for temp in range(length):
content.append( struct.unpack(formatStr[p][i], data[p][d:(d+dataSize[formatStr[p][i]])])[0])
d += dataSize[formatStr[p][i]]
length = 0
firstElement = False
i += 1
p += 1
if (isList):
final = self._convert2list(content)
elif isString:
final = ''
for t in content:
final += t
else:
final = content[0]
return final
def _recvAndCheck( self):
# Check's the sync byte to make sure the packet was fully recieved.
# Send a response accordingly
d = self.conn.recv(self.buffer + 2)
if (struct.unpack('H', d[-2:])[0] == 0x55AA):
self.conn.sendall(b"Valid.")
return d[:-2]
else:
self.conn.sendall(b"Invalid.")
raise packetException('Communication Error: Packed could not be validated')
def getData( self, showRawData=False):
# Waits for and recieves all data in a communication attempt
#try:
# Wait for the data
data = self._recvAndCheck()
# Get the format string
if not data: return 0
formatString = self._unpackFmt( data)
# Recieve the rest of the packets if any, as identified in the format string
payload = []
for i in range(len(formatString)):
d = self._recvAndCheck()
if not data: return 0
payload.append( d)
# Unpack the data
content = self._unpackData( formatString, payload)
# Print raw data if requested by the user
if (showRawData):
print("\nBuffer Size: ", self.buffer, "\nFormat: ")
try:
[print(f) for f in formatString]
except:
print(formatString)
print("Recieved:")
try:
[print(str(c)) for c in content]
except:
print(content)
return content
#except packetException:
# print('Listening for resent data...')
# self.getData( showRawData=showRawData)
def streamData( self, showRawData=False):
# Creates a continuously refreshing data stream
self.dataBuffer = []
self.dataStream = []
self.receiveEvt = Event()
self.streaming = True
self.listen = Thread(target=self._waitForStream)
self.listen.daemon = True
self.listen.start()
return 1
def _waitForStream( self):
# Waits for the next communication in a data stream
print('Listening for data...')
try:
while self.streaming:
d = self.getData()
# print(d)
self.dataStream.append(d)
except KeyboardInterrupt:
thread.exit()
return
except BrokenPipeError:
thread.exit()
return
class packetException(Exception):
pass
|
[
"threading.Thread",
"socket.socket",
"struct.unpack",
"threading.Event",
"sys.exit"
] |
[((7089, 7096), 'threading.Event', 'Event', ([], {}), '()\n', (7094, 7096), False, 'from threading import Thread, Event\n'), ((7149, 7183), 'threading.Thread', 'Thread', ([], {'target': 'self._waitForStream'}), '(target=self._waitForStream)\n', (7155, 7183), False, 'from threading import Thread, Event\n'), ((576, 625), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (589, 625), False, 'import socket\n'), ((2638, 2666), 'struct.unpack', 'struct.unpack', (['"""I"""', 'data[:4]'], {}), "('I', data[:4])\n", (2651, 2666), False, 'import struct\n'), ((676, 776), 'sys.exit', 'sys.exit', (['"""Client IP Address was not valid. Check that the correct IP address was entered"""'], {}), "(\n 'Client IP Address was not valid. Check that the correct IP address was entered'\n )\n", (684, 776), False, 'import sys\n'), ((5478, 5504), 'struct.unpack', 'struct.unpack', (['"""H"""', 'd[-2:]'], {}), "('H', d[-2:])\n", (5491, 5504), False, 'import struct\n'), ((4833, 4905), 'struct.unpack', 'struct.unpack', (['formatStr[p][i]', 'data[p][d:d + dataSize[formatStr[p][i]]]'], {}), '(formatStr[p][i], data[p][d:d + dataSize[formatStr[p][i]]])\n', (4846, 4905), False, 'import struct\n')]
|
from mongomock import MongoClient
from repository.repository_factory import RepositoryFactory
class MongoMockRepository(RepositoryFactory):
__data_source = None
def get_data_source(self):
if MongoMockRepository.__data_source == None:
MongoMockRepository.__data_source = MongoClient()
return MongoMockRepository.__data_source[self.args.mongo_database_name]
def __init__(self, args):
self.args = args
|
[
"mongomock.MongoClient"
] |
[((302, 315), 'mongomock.MongoClient', 'MongoClient', ([], {}), '()\n', (313, 315), False, 'from mongomock import MongoClient\n')]
|
# Copyright (c) 2020 Room 525 Research Group, Zhejiang University.
# All Rights Reserved.
"""Defination of Role Makers."""
from __future__ import print_function
import paddle.fluid as fluid
import os
import time
__all__ = [
'Role', 'RoleMakerBase', 'MPISymetricRoleMaker', 'UserDefinedRoleMaker',
'UserDefinedCollectiveRoleMaker', 'PaddleCloudRoleMaker', 'GeneralRoleMaker', "Open_KS_read", "Open_KS_ImageNet",
"Open_KS_Read_Character", "Open_KS_Character"
]
class Role:
WORKER = 1
SERVER = 2
class Open_KS_ImageNet:
"""
A single image class.
Loading and using the Mini-ImageNet dataset.
To use these APIs, you should prepare a directory that
contains three sub-directories: train, test, and val.
Each of these three directories should contain one
sub-directory per WordNet ID.
"""
def __init__(self, dir_path):
self.dir_path = dir_path
self._cache = {}
def sample(self, num_images):
"""
Sample images (as numpy arrays) from the class.
Returns:
A sequence of 84x84x3 numpy arrays.
Each pixel ranges from 0 to 1.
"""
names = [f for f in os.listdir(self.dir_path) if f.endswith('.jpg')]
random.shuffle(names)
images = []
for name in names[:num_images]:
images.append(self._read_image(name))
return images
def _read_image(self, name):
if name in self._cache:
return self._cache[name].astype('float32') / 0xff
with open(os.path.join(self.dir_path, name), 'rb') as in_file:
img = Image.open(in_file).resize((84, 84)).convert('RGB')
self._cache[name] = np.array(img)
return self._read_image(name)
class Open_KS_read:
def read_dataset(data_dir):
"""
Read the Mini-ImageNet dataset.
Args:
data_dir: directory containing Mini-ImageNet.
Returns:
A tuple (train, val, test) of sequences of
ImageNetClass instances.
"""
return tuple(_read_classes(os.path.join(data_dir, x)) for x in ['train', 'val', 'test'])
def _read_classes(dir_path):
"""
Read the WNID directories in a directory.
"""
return [ImageNetClass(os.path.join(dir_path, f)) for f in os.listdir(dir_path)
if f.startswith('n')]
class Open_KS_Character:
"""
A single character class.
"""
def __init__(self, dir_path, rotation=0):
self.dir_path = dir_path
self.rotation = rotation
self._cache = {}
def sample(self, num_images):
"""
Sample images (as numpy arrays) from the class.
Returns:
A sequence of 28x28 numpy arrays.
Each pixel ranges from 0 to 1.
"""
names = [f for f in os.listdir(self.dir_path) if f.endswith('.png')]
random.shuffle(names)
images = []
for name in names[:num_images]:
images.append(self._read_image(os.path.join(self.dir_path, name)))
return images
def _read_image(self, path):
if path in self._cache:
return self._cache[path]
with open(path, 'rb') as in_file:
img = Image.open(in_file).resize((28, 28)).rotate(self.rotation)
self._cache[path] = np.array(img).astype('float32')
return self._cache[path]
class Open_KS_Read_Character:
def read_dataset(data_dir):
"""
Iterate over the characters in a data directory.
Args:
data_dir: a directory of alphabet directories.
Returns:
An iterable over Characters.
The dataset is unaugmented and not split up into
training and test sets.
"""
for alphabet_name in sorted(os.listdir(data_dir)):
alphabet_dir = os.path.join(data_dir, alphabet_name)
if not os.path.isdir(alphabet_dir):
continue
for char_name in sorted(os.listdir(alphabet_dir)):
if not char_name.startswith('character'):
continue
yield Character(os.path.join(alphabet_dir, char_name), 0)
def split_dataset(dataset, num_train=1200):
"""
Split the dataset into a training and test set.
Args:
dataset: an iterable of Characters.
Returns:
A tuple (train, test) of Character sequences.
"""
all_data = list(dataset)
random.shuffle(all_data)
return all_data[:num_train], all_data[num_train:]
def augment_dataset(dataset):
"""
Augment the dataset by adding 90 degree rotations.
Args:
dataset: an iterable of Characters.
Returns:
An iterable of augmented Characters.
"""
for character in dataset:
for rotation in [0, 90, 180, 270]:
yield Character(character.dir_path, rotation=rotation)
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
def is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
print("warning: RoleMakerBase does not have all gather.")
return None
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
print("warning: RoleMakerBase does not have barrier all.")
class MPIRoleMaker(RoleMakerBase):
"""
MPIRoleMaker is a MPI-API based role maker which is a counter-part of K8SRoleMaker
mpi4py will be used if a developer inherits MPIRoleMaker
"""
def __init__(self):
"""Init."""
super(MPIRoleMaker, self).__init__()
from mpi4py import MPI
self.MPI = MPI
self._comm = MPI.COMM_WORLD
self._node_type_comm = None
self._ips = None
self._ip = None
def _get_rank(self):
"""Return rank."""
self._rank = self._comm.Get_rank()
return self._rank
def _get_size(self):
"""Return size."""
self._size = self._comm.Get_size()
return self._size
def _all_gather(self, obj):
"""
all_gather(obj) will call MPI's allgather function
"""
self._barrier_all()
return self._comm.allgather(obj)
def _worker_gather(self, obj):
"""
worker_gather(obj) will call MPI's allgather function
"""
if self.is_worker():
self._node_type_comm.barrier()
return self._node_type_comm.allgather(obj)
return None
def _barrier_all(self):
"""
barrier_all() will call MPI's barrier_all function
"""
self._comm.barrier()
def _finalize(self):
"""
finalize the current MPI instance.
"""
self.MPI.Finalize()
def _get_ips(self):
"""
collect current distributed job's ip list
"""
if not self._ips:
self._ips = self._comm.allgather(self.get_local_ip())
return self._ips
def get_local_ip(self):
"""Return get local ip."""
import socket
self._ip = socket.gethostbyname(socket.gethostname())
return self._ip
def generate_role(self):
"""
generate_role() should be called to identify current process's role
"""
raise NotImplementedError("Please implement this method in child class")
class MPISymetricRoleMaker(MPIRoleMaker):
"""
MPISymetricRoleMaker is designed for worker and server assignment
under MPI. Typically, a worker and a server node will be appointed
on each physical node. This role maker can be only used under MPI.
"""
def __init__(self):
"""Init."""
super(MPISymetricRoleMaker, self).__init__()
self._node_type = None
self._proc_per_node = 2
self._pserver_rand_port = 0
def _check_role_generation(self):
"""Check whether role has been generated."""
if not self._role_is_generated:
raise NameError("generate_role() should be called first")
return True
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
if not self._role_is_generated:
self.generate_role()
return self._all_gather(input)
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
if not self.is_worker():
print("warning: current role is not worker in all_reduce_worker")
return
self._all_reduce(input, output, mode)
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
if not self._role_is_generated:
self.generate_role()
if self.is_worker():
self._node_type_comm.barrier()
else:
print("warning: current role is not worker in barrier_worker")
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
if not self._role_is_generated:
self.generate_role()
self._comm.barrier()
def is_first_worker(self):
"""
return whether current process is the first worker assigned by role maker
"""
if self._check_role_generation():
return self.is_worker() and 0 == self.worker_index()
return False
def get_pserver_endpoints(self):
"""
get pserver endpoints
Returns:
endpoints(list): pserver endpoints
"""
if self._pserver_rand_port <= 0:
import random
random.seed(self._server_num())
# port will be randomly generated from 60001 to 63999
# random seed is server num so that all nodes will get
# the same port
self._pserver_rand_port = random.randint(60001, 64000)
endpoints = [
x + ":" + str(self._pserver_rand_port)
for x in self._server_endpoints
]
return endpoints
def worker_num(self):
return self._worker_num()
def is_worker(self):
"""
return whether current process is worker assigned by role maker
"""
if self._check_role_generation():
return self._node_type == 1
return False
def is_server(self):
"""
return whether current process is server assigned by role maker
"""
if self._check_role_generation():
return self._node_type == 0
return False
def _worker_num(self):
"""
return the current number of worker
"""
if self._check_role_generation():
return self._get_size() / self._proc_per_node
return 0
def _server_num(self):
"""
return the current number of server
"""
if self._check_role_generation():
return self._get_size() / self._proc_per_node
else:
self.generate_role()
return self._get_size() / self._proc_per_node
def worker_index(self):
"""
return the index of worker
"""
if self._check_role_generation():
return self._rank / self._proc_per_node
else:
self.generate_role()
return self._get_size() / 2
def server_index(self):
"""
return the index of server
"""
if self._check_role_generation():
return self._rank / self._proc_per_node
else:
self.generate_role()
return self._get_size() / self._proc_per_node
def _all_reduce(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
if mode == "sum":
mode = self.MPI.SUM
elif mode == "max":
mode = self.MPI.MAX
elif mode == "min":
mode = self.MPI.MIN
else:
raise ValueError("unknown mode: %s" % mode)
self._node_type_comm.Allreduce(input, output, op=mode)
def _barrier_worker(self):
"""
barrier all workers in current distributed job
"""
if self._check_role_generation():
if self.is_worker():
self._node_type_comm.barrier()
else:
raise Exception("You should check role generation first")
def _barrier_server(self):
"""
barrier all servers in current distributed job
"""
if self._check_role_generation():
if self.is_server():
self._node_type_comm.barrier()
else:
raise Exception("You should check role generation first")
def generate_role(self):
"""
generate currently process's role
"""
if not self._role_is_generated:
# TODO(guru4elephant): only allow to be called once
self._worker_endpoints = self._get_ips()[1::2]
self._server_endpoints = self._get_ips()[::2]
if 0 == self._get_rank() % self._proc_per_node % 2:
self._node_type = 0
else:
self._node_type = 1
self._node_type_comm = self._comm.Split(self._node_type)
self._role_is_generated = True
else:
raise Exception("You should check role generation first")
class PaddleCloudRoleMaker(RoleMakerBase):
"""
role maker for paddle cloud,
base class is RoleMakerBase
"""
def __init__(self, is_collective=False):
super(PaddleCloudRoleMaker, self).__init__()
self._role_is_generated = False
self._is_collective = is_collective
def generate_role(self):
"""Generate role."""
if not self._role_is_generated:
if not self._is_collective:
try:
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port), eg. 127.0.0.1:6001
eplist = os.environ["PADDLE_PSERVERS_IP_PORT_LIST"].split(
",")
# note that, we usually assign the same port to different ips
# if we run parameter server training in local mode
# port should be different in environment variables
trainers_num = int(os.environ["PADDLE_TRAINERS_NUM"])
training_role = os.environ["TRAINING_ROLE"]
if training_role not in ["TRAINER", "PSERVER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER")
if training_role == "TRAINER":
role = Role.WORKER
current_id = int(os.environ["PADDLE_TRAINER_ID"])
elif training_role == "PSERVER":
role = Role.SERVER
cur_ip = os.environ["POD_IP"]
curr_port = os.environ["PADDLE_PORT"]
curr_endpoint = ":".join([cur_ip, curr_port])
current_id = eplist.index(curr_endpoint)
else:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER")
except ValueError as ve:
raise ValueError(
"something wrong with PaddleCloud, please check environment"
)
self._trainers_num = trainers_num
self._server_endpoints = eplist
self._role = role
self._current_id = current_id
else:
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE",
"TRAINER")
assert (self._training_role == "TRAINER")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
assert self._worker_endpoints is not None, "can't find PADDLE_TRAINER_ENDPOINTS"
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._role_is_generated = True
def get_pserver_endpoints(self):
if not self._role_is_generated:
self.generate_role()
return self._server_endpoints
def is_worker(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER
def is_server(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.SERVER
def is_first_worker(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
if not self._role_is_generated:
self.generate_role()
return self._current_id
def server_index(self):
if not self._role_is_generated:
self.generate_role()
return self._current_id
def worker_num(self):
if not self._role_is_generated:
self.generate_role()
return self._trainers_num
class GeneralRoleMaker(RoleMakerBase):
"""
This role maker is for general use, you can set os.environ to customize:
PADDLE_PSERVERS_IP_PORT_LIST : all pservers' ip:port, separated by ','
PADDLE_TRAINER_ENDPOINTS : all trainers' ip:port, separated by ','
TRAINING_ROLE : TRAINER or PSERVER
PADDLE_TRAINER_ID : current trainer id (only for trainer),
it is index in PADDLE_TRAINER_ENDPOINTS
PADDLE_PSERVER_ID : current pserver id (only for pserver)
it is index in PADDLE_PSERVERS_IP_PORT_LIST
"""
def __init__(self, **kwargs):
super(RoleMakerBase, self).__init__()
self._role_is_generated = False
self._hdfs_name = kwargs.get("hdfs_name", "")
self._hdfs_ugi = kwargs.get("hdfs_ugi", "")
self._hdfs_path = kwargs.get("path", "")
self._iface = self.__get_default_iface()
# this environment variable can be empty
self._prefix = os.getenv("SYS_JOB_ID", "")
def generate_role(self):
"""
generate role for general role maker
"""
if not self._role_is_generated:
eplist = os.environ["PADDLE_PSERVERS_IP_PORT_LIST"].split(",")
training_role = os.environ["TRAINING_ROLE"]
worker_endpoints = os.environ["PADDLE_TRAINER_ENDPOINTS"].split(",")
trainers_num = len(worker_endpoints)
if training_role not in ["TRAINER", "PSERVER"]:
raise ValueError("TRAINING_ROLE must be PSERVER or TRAINER")
if training_role == "TRAINER":
role = Role.WORKER
current_id = int(os.environ["PADDLE_TRAINER_ID"])
self._node_type = 1
self._cur_endpoint = worker_endpoints[current_id]
gloo = fluid.core.Gloo()
gloo.init(current_id,
len(worker_endpoints),
self._hdfs_path.rstrip("/") + "/trainer",
self._hdfs_name, self._hdfs_ugi, self._iface,
self._prefix)
self._node_type_comm = gloo
elif training_role == "PSERVER":
role = Role.SERVER
if os.environ.get("PADDLE_PSERVER_ID") is not None:
current_id = int(os.environ["PADDLE_PSERVER_ID"])
cur_endpoint = eplist[current_id]
else:
# this is for compatible with paddlecloud
cur_ip = os.environ["POD_IP"]
cur_port = os.environ["PADDLE_PORT"]
cur_endpoint = ":".join([cur_ip, cur_port])
current_id = eplist.index(cur_endpoint)
self._node_type = 0
self._cur_endpoint = cur_endpoint
gloo = fluid.core.Gloo()
gloo.init(current_id,
len(eplist),
self._hdfs_path.rstrip("/") + "/pserver",
self._hdfs_name, self._hdfs_ugi, self._iface,
self._prefix)
self._node_type_comm = gloo
gloo = fluid.core.Gloo()
all_list = worker_endpoints + eplist
gloo.init(
all_list.index(self._cur_endpoint),
len(all_list),
self._hdfs_path.rstrip("/") + "/all", self._hdfs_name,
self._hdfs_ugi, self._iface, self._prefix)
self._all_comm = gloo
self._trainers_num = trainers_num
self._server_endpoints = eplist
self._role = role
self._current_id = current_id
self._rank = all_list.index(self._cur_endpoint)
self._size = len(all_list)
self._worker_endpoints = worker_endpoints
self._role_is_generated = True
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
return self._all_gather(input)
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self.is_worker():
return
self._all_reduce(input, output, mode)
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
self._barrier_worker()
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
self._barrier_all()
def get_local_endpoint(self):
"""
get local endpoint of current process
"""
if not self._role_is_generated:
self.generate_role()
return self._cur_endpoint
def get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self.generate_role()
return self._worker_endpoints
def get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._server_endpoints
def is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER
def is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.SERVER
def is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self.generate_role()
return self._current_id
def server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self.generate_role()
return self._current_id
def worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self.generate_role()
return self._worker_num()
def server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self.generate_role()
return self._server_num()
def _barrier_worker(self):
"""
barrier all workers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
if self.is_worker():
self._node_type_comm.barrier()
def _barrier_all(self):
"""
barrier all workers and servers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
self._all_comm.barrier()
def _barrier_server(self):
"""
barrier all servers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
if self.is_server():
self._node_type_comm.barrier()
def _worker_num(self):
"""
return the current number of worker
"""
if not self._role_is_generated:
self.generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self.generate_role()
return len(self._server_endpoints)
def _finalize(self):
"""Default do nothing."""
pass
def _all_reduce(self, input, output, mode="sum"):
"""
all reduce between all workers
Args:
input(list|numpy.array): array of one dim
output(list|numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
input_list = [i for i in input]
ans = self._node_type_comm.all_reduce(input_list, mode)
for i in range(len(ans)):
output[i] = ans[i]
def _all_gather(self, obj):
"""
gather between all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
self._barrier_all()
return self._all_comm.all_gather(obj)
def _worker_gather(self, obj):
"""
gather between all workers
"""
if not self._role_is_generated:
self.generate_role()
if not self.is_worker():
return None
self._barrier_worker()
return self._node_type_comm.all_gather(obj)
def _get_rank(self):
"""
get current rank in all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._rank
def _get_size(self):
"""
get total num of all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._size
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
import netifaces
gateways = netifaces.gateways()
if gateways.get(netifaces.AF_INET) != None:
gateway = gateways[netifaces.AF_INET]
if len(gateway) > 0 and len(gateway[0]) > 1:
return gateway[0][1]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
import netifaces
for intf_name in netifaces.interfaces():
addresses = netifaces.ifaddresses(intf_name)
if netifaces.AF_INET in addresses:
ipv4_addresses = addresses[netifaces.AF_INET]
for ipv4_address in ipv4_addresses:
if 'broadcast' in ipv4_address:
return intf_name
return "lo"
class UserDefinedRoleMaker(RoleMakerBase):
"""
UserDefinedRoleMaker is designed for worker and server assignment
under manual. Typically, a worker and a server node will be appointed
on each physical node, It can be assign by user.
"""
def __init__(self,
current_id=0,
role=Role.WORKER,
worker_num=0,
server_endpoints=None):
super(UserDefinedRoleMaker, self).__init__()
if not isinstance(server_endpoints, list):
raise TypeError("server_endpoints must be as string list")
elif len(server_endpoints) <= 0:
raise ValueError(
"the length of server_endpoints list must be greater than 0")
elif len(server_endpoints) != len(set(server_endpoints)):
raise ValueError("server_endpoints can't have duplicate elements")
else:
for server_endpoint in server_endpoints:
if not isinstance(server_endpoint, str):
raise TypeError(
"every element in server_endpoints list must be as string"
)
self._server_endpoints = server_endpoints
if role != Role.WORKER and role != Role.SERVER:
raise TypeError("role must be as Role")
else:
self._role = role
if not isinstance(current_id, int):
raise TypeError("current_id must be as int")
else:
if current_id < 0:
raise ValueError(
"current_id must be greater than or equal to 0")
elif self._role == Role.SERVER and current_id >= len(
server_endpoints):
raise ValueError(
"if role is Role.SERVER, current_id must be less than or equal to len(server_endpoints) - 1"
)
self._current_id = current_id
if not isinstance(worker_num, int):
raise TypeError("worker_num must be as int")
else:
if worker_num <= 0:
raise ValueError("worker_num must be greater than 0")
self._worker_num = worker_num
def generate_role(self):
self._role_is_generated = True
def is_worker(self):
return self._role == Role.WORKER
def is_server(self):
return self._role == Role.SERVER
def is_first_worker(self):
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
return self._current_id
def server_index(self):
return self._current_id
def worker_num(self):
return self._worker_num
class UserDefinedCollectiveRoleMaker(RoleMakerBase):
"""
UserDefinedCollectiveRoleMaker is designed for worker assignment
under manual for collective mode.
"""
def __init__(self, current_id=0, worker_endpoints=None):
super(UserDefinedCollectiveRoleMaker, self).__init__()
if not isinstance(worker_endpoints, list):
raise TypeError("worker_endpoints must be as string list")
elif len(worker_endpoints) <= 0:
raise ValueError(
"the length of worker_endpoints list must be greater than 0")
elif len(worker_endpoints) != len(set(worker_endpoints)):
raise ValueError("worker_endpoints can't have duplicate elements")
else:
for worker_endpoint in worker_endpoints:
if not isinstance(worker_endpoint, str):
raise TypeError(
"every element in worker_endpoints list must be as string"
)
self._worker_endpoints = worker_endpoints
if not isinstance(current_id, int):
raise TypeError("current_id must be as int")
else:
if current_id < 0:
raise ValueError(
"current_id must be greater than or equal to 0")
elif current_id >= len(worker_endpoints):
raise ValueError(
"current_id must be less than or equal to len(worker_endpoints) - 1"
)
self._current_id = current_id
self._worker_num = len(self._worker_endpoints)
def generate_role(self):
self._role_is_generated = True
def is_worker(self):
return True
def is_first_worker(self):
return self._current_id == 0
def worker_index(self):
return self._current_id
def worker_num(self):
return self._worker_num
|
[
"netifaces.interfaces",
"paddle.fluid.core.Gloo",
"os.path.join",
"random.randint",
"netifaces.gateways",
"random.shuffle",
"os.path.isdir",
"os.environ.get",
"socket.gethostname",
"netifaces.ifaddresses",
"os.getenv",
"os.listdir"
] |
[((1245, 1266), 'random.shuffle', 'random.shuffle', (['names'], {}), '(names)\n', (1259, 1266), False, 'import random\n'), ((2897, 2918), 'random.shuffle', 'random.shuffle', (['names'], {}), '(names)\n', (2911, 2918), False, 'import random\n'), ((4486, 4510), 'random.shuffle', 'random.shuffle', (['all_data'], {}), '(all_data)\n', (4500, 4510), False, 'import random\n'), ((22154, 22181), 'os.getenv', 'os.getenv', (['"""SYS_JOB_ID"""', '""""""'], {}), "('SYS_JOB_ID', '')\n", (22163, 22181), False, 'import os\n'), ((31283, 31303), 'netifaces.gateways', 'netifaces.gateways', ([], {}), '()\n', (31301, 31303), False, 'import netifaces\n'), ((31685, 31707), 'netifaces.interfaces', 'netifaces.interfaces', ([], {}), '()\n', (31705, 31707), False, 'import netifaces\n'), ((3800, 3820), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (3810, 3820), False, 'import os\n'), ((3850, 3887), 'os.path.join', 'os.path.join', (['data_dir', 'alphabet_name'], {}), '(data_dir, alphabet_name)\n', (3862, 3887), False, 'import os\n'), ((10079, 10099), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (10097, 10099), False, 'import socket\n'), ((13269, 13297), 'random.randint', 'random.randint', (['(60001)', '(64000)'], {}), '(60001, 64000)\n', (13283, 13297), False, 'import random\n'), ((24352, 24369), 'paddle.fluid.core.Gloo', 'fluid.core.Gloo', ([], {}), '()\n', (24367, 24369), True, 'import paddle.fluid as fluid\n'), ((31733, 31765), 'netifaces.ifaddresses', 'netifaces.ifaddresses', (['intf_name'], {}), '(intf_name)\n', (31754, 31765), False, 'import netifaces\n'), ((1188, 1213), 'os.listdir', 'os.listdir', (['self.dir_path'], {}), '(self.dir_path)\n', (1198, 1213), False, 'import os\n'), ((1545, 1578), 'os.path.join', 'os.path.join', (['self.dir_path', 'name'], {}), '(self.dir_path, name)\n', (1557, 1578), False, 'import os\n'), ((2286, 2311), 'os.path.join', 'os.path.join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (2298, 2311), False, 'import os\n'), ((2322, 2342), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (2332, 2342), False, 'import os\n'), ((2840, 2865), 'os.listdir', 'os.listdir', (['self.dir_path'], {}), '(self.dir_path)\n', (2850, 2865), False, 'import os\n'), ((3907, 3934), 'os.path.isdir', 'os.path.isdir', (['alphabet_dir'], {}), '(alphabet_dir)\n', (3920, 3934), False, 'import os\n'), ((3997, 4021), 'os.listdir', 'os.listdir', (['alphabet_dir'], {}), '(alphabet_dir)\n', (4007, 4021), False, 'import os\n'), ((19498, 19542), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINING_ROLE"""', '"""TRAINER"""'], {}), "('PADDLE_TRAINING_ROLE', 'TRAINER')\n", (19507, 19542), False, 'import os\n'), ((19690, 19727), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINER_ENDPOINTS"""'], {}), "('PADDLE_TRAINER_ENDPOINTS')\n", (19699, 19727), False, 'import os\n'), ((19769, 19805), 'os.getenv', 'os.getenv', (['"""PADDLE_CURRENT_ENDPOINT"""'], {}), "('PADDLE_CURRENT_ENDPOINT')\n", (19778, 19805), False, 'import os\n'), ((22988, 23005), 'paddle.fluid.core.Gloo', 'fluid.core.Gloo', ([], {}), '()\n', (23003, 23005), True, 'import paddle.fluid as fluid\n'), ((2086, 2111), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (2098, 2111), False, 'import os\n'), ((3022, 3055), 'os.path.join', 'os.path.join', (['self.dir_path', 'name'], {}), '(self.dir_path, name)\n', (3034, 3055), False, 'import os\n'), ((19423, 19458), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINER_ID"""', '"""0"""'], {}), "('PADDLE_TRAINER_ID', '0')\n", (19432, 19458), False, 'import os\n'), ((24013, 24030), 'paddle.fluid.core.Gloo', 'fluid.core.Gloo', ([], {}), '()\n', (24028, 24030), True, 'import paddle.fluid as fluid\n'), ((4143, 4180), 'os.path.join', 'os.path.join', (['alphabet_dir', 'char_name'], {}), '(alphabet_dir, char_name)\n', (4155, 4180), False, 'import os\n'), ((23416, 23451), 'os.environ.get', 'os.environ.get', (['"""PADDLE_PSERVER_ID"""'], {}), "('PADDLE_PSERVER_ID')\n", (23430, 23451), False, 'import os\n')]
|
import main
import unittest
class OnelineTest(unittest.TestCase):
def example_test(self):
a = [1, 2, 3, 4, 5]
b = [10, 0, 10, 0, 10]
main.fmax(a,b)
self.assertEqual(a,[10, 2, 10, 4, 10])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"main.fmax"
] |
[((260, 275), 'unittest.main', 'unittest.main', ([], {}), '()\n', (273, 275), False, 'import unittest\n'), ((167, 182), 'main.fmax', 'main.fmax', (['a', 'b'], {}), '(a, b)\n', (176, 182), False, 'import main\n')]
|
from utilities import load_stf
import numpy as np
from scipy.spatial.distance import cosine
import time
#vsm = load_stf('glove.840B.300d.sample.txt',300)
#csm = np.load('centroids').item()
#distrib = np.zeros((100000,10))
#oFile = open('f_distrib','w+')
def dot_product(v1,v2):
total = 0
if len(v1) != len(v2):
throw
for i in range(len(v1)):
total += float(v1[i])*float(v2[i])
return total
def centroid(vsm,w,k):
total = np.zeros(len(vsm.word_vectors[vsm.dictionary[w]]))
for v in vsm.most_similar(w,k+1):
total += vsm.word_vectors[vsm.dictionary[v[0]]]
total /= k
return total
def lcent_similarity(w1,w2,vsm,gamma,k,c):
v1 = vsm.word_vectors[vsm.dictionary[w1]]
v2 = vsm.word_vectors[vsm.dictionary[w2]]
v1v2 = dot_product(v1,v2)
v1c = dot_product(v1,c)
v1cg = np.power(v1c,gamma)
return v1v2 - v1cg
def insert(v,sims,vec,val):
nv = np.zeros(len(v))
nsims = np.zeros((len(sims),300))
swap = 0
for i in range(len(v)):
if v[i]<val:
swap = 1
break
if swap == 0:
return (v,sims)
nv[:i] = v[:i]
nsims[:i] = sims[:i]
nv[i] = val
nsims[i] = vec
nv[i+1:] = v[i:len(v)-1]
nsims[i+1:] = sims[i:len(sims)-1]
return (nv,nsims)
def most_similar_lcent(vsm,csm,word,k,gamma):
sims = np.zeros(10)
vecs = np.zeros(10)
c = csm[word]
for i,d_word in enumerate(vsm.dictionary):
sim = lcent_similarity(word,d_word,vsm,gamma,k,c)
(sims,vecs) = insert(vecs,sims,vsm.dictionary[d_word],sim)
ret = []
for i in range(10):
ret.append((sims[i],vecs[i]))
return ret
'''
centroids = {}
for i,j in enumerate(vsm.dictionary):
if i%100 == 0:
print i
centroids[j] = centroid(vsm,j,11)
'''
#c = time.time()
#for j,w in enumerate(vsm.dictionary):
# print j
# print time.time() - c
# c = time.time()
# ms = most_similar_lcent(vsm,csm,w,11,2)
# for k,s in enumerate(ms):
# print s
# i = vsm.dictionary[s]
# distrib[i,k] += 1
#for c in centroids:
# oFile.write(str(c) + u' ')
# for i in centroids[c]:
# oFile.write(str(i) + u' ')
# oFile.write(u'\n')
#np.save(oFile,distrib)
#oFile.close()
|
[
"numpy.power",
"numpy.zeros"
] |
[((788, 808), 'numpy.power', 'np.power', (['v1c', 'gamma'], {}), '(v1c, gamma)\n', (796, 808), True, 'import numpy as np\n'), ((1221, 1233), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1229, 1233), True, 'import numpy as np\n'), ((1242, 1254), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1250, 1254), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import json
import logging
import pkg_resources
import pytz
import sys
import tzlocal
import yaml
from datetime import datetime, timedelta
from os.path import expanduser, isfile
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from .db import create_sessionmaker, Measurement, Sensor, SensorType
colors = [
"#1f77b4",
"#7f7f7f",
"#17becf",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#bcbd22"
]
app = dash.Dash(__name__)
app.layout = html.Div([
html.H1('TerraPi dashboard'),
dcc.Interval(
id = 'interval-component',
interval = 5 * 60 * 1000,
n_intervals = 0
),
html.Div(id='intermediate-value', style={'display': 'none'})
])
def generate_update_func(sensor_type):
def update_graph_live(measurements_json, relayout_data):
global sensors
m = json.loads(measurements_json)
sensor_ids = [s.id for s in sensors if s.type==sensor_type]
data = []
i = 0
for sensor_id in sensor_ids:
data.append(go.Scatter(
x = m[str(sensor_id)]['timestamp'],
y = m[str(sensor_id)]['value'],
name = [s.name for s in sensors if s.id==sensor_id][0],
mode = 'lines',
line = dict(color=colors[i%len(colors)])
))
i = i + 1
layout = go.Layout(
title = sensor_type.name.capitalize(),
margin = dict(l=60, r=60, b=30, t=30),
legend = dict(x=0, y=1, xanchor='left'),
xaxis = dict(
type = 'date',
range = [
relayout_data['xaxis.range[0]'],
relayout_data['xaxis.range[1]']
] if 'xaxis.range[0]' in relayout_data else None,
rangeselector = dict(
buttons = list([
dict(count=1, label='1 day', step='day', stepmode='backward'),
dict(count=7, label='1 week', step='day', stepmode='backward'),
dict(count=1, label='1 month', step='month', stepmode='backward'),
dict(step='all')
])
),
),
yaxis = dict(fixedrange = True)
)
return go.Figure(layout=layout, data=data)
return update_graph_live
@app.callback(
Output('intermediate-value', 'children'),
[Input('interval-component', 'n_intervals')])
def update_measurements(n):
global sensors
global sessionmaker
measurements = dict()
session = sessionmaker()
one_day = timedelta(hours=30*24)
local_tz = tzlocal.get_localzone()
for sensor in sensors:
measurements[sensor.id] = dict()
_data = session.query(Measurement).filter(
Measurement.sensor==sensor).filter(
Measurement.timestamp>datetime.now()-one_day).order_by(
Measurement.timestamp).all()
measurements[sensor.id]['timestamp'] = [
m.timestamp.replace(tzinfo=pytz.utc).astimezone(local_tz) for m in _data]
measurements[sensor.id]['value'] = [m.value for m in _data]
session.close()
return json.dumps(measurements, default=str)
def get_connection_string():
config_paths = []
if len(sys.argv) > 1:
config_paths.append(sys.argv[1])
config_paths.append(expanduser('~') + '/.terrapi.yaml')
config_paths.append(expanduser('~') + '/.config/terrapi/config.yaml')
config_paths.append(pkg_resources.resource_filename('TerraPi',
'conf/config-sample.yaml'))
for path in config_paths:
if isfile(path):
configfile = path
break
if not configfile:
logging.error("No config file found! Exiting..")
sys.exit(1)
with open(configfile, 'r') as stream:
config = yaml.load(stream)
if not config:
logging.error("Empty configuration! Exiting...")
sys.exit(1)
connection_string = config.get('connection_string')
if not connection_string:
logging.info("Database configuration not found, using SQLite.")
database = pkg_resources.resource_filename('TerraPi','data/terrapi.db')
connection_string = 'sqlite:///{}'.format(database)
return connection_string
def main():
global sensors
global sessionmaker
connection_string = get_connection_string()
sessionmaker = create_sessionmaker(connection_string)
session = sessionmaker()
sensors = session.query(Sensor).all()
for s in sensors:
app.layout.children.append(
html.Div(
children = dcc.Graph(id = s.type.name),
style = dict(
marginBottom = 80,
marginTop = 80)
))
session.close()
for st in set([s.type for s in sensors]):
app.callback(
Output(st.name, 'figure'),
[Input('intermediate-value', 'children')],
[State(st.name, 'relayoutData')]
)(generate_update_func(st))
app.run_server(debug=True)
if __name__ == '__main__':
main()
|
[
"yaml.load",
"json.dumps",
"pkg_resources.resource_filename",
"os.path.isfile",
"logging.error",
"dash.Dash",
"json.loads",
"dash_html_components.Div",
"dash.dependencies.State",
"datetime.timedelta",
"plotly.graph_objs.Figure",
"datetime.datetime.now",
"dash_core_components.Interval",
"dash.dependencies.Input",
"dash_core_components.Graph",
"sys.exit",
"tzlocal.get_localzone",
"logging.info",
"dash_html_components.H1",
"dash.dependencies.Output",
"os.path.expanduser"
] |
[((624, 643), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (633, 643), False, 'import dash\n'), ((2795, 2819), 'datetime.timedelta', 'timedelta', ([], {'hours': '(30 * 24)'}), '(hours=30 * 24)\n', (2804, 2819), False, 'from datetime import datetime, timedelta\n'), ((2833, 2856), 'tzlocal.get_localzone', 'tzlocal.get_localzone', ([], {}), '()\n', (2854, 2856), False, 'import tzlocal\n'), ((3390, 3427), 'json.dumps', 'json.dumps', (['measurements'], {'default': 'str'}), '(measurements, default=str)\n', (3400, 3427), False, 'import json\n'), ((2562, 2602), 'dash.dependencies.Output', 'Output', (['"""intermediate-value"""', '"""children"""'], {}), "('intermediate-value', 'children')\n", (2568, 2602), False, 'from dash.dependencies import Input, Output, State\n'), ((672, 700), 'dash_html_components.H1', 'html.H1', (['"""TerraPi dashboard"""'], {}), "('TerraPi dashboard')\n", (679, 700), True, 'import dash_html_components as html\n'), ((706, 782), 'dash_core_components.Interval', 'dcc.Interval', ([], {'id': '"""interval-component"""', 'interval': '(5 * 60 * 1000)', 'n_intervals': '(0)'}), "(id='interval-component', interval=5 * 60 * 1000, n_intervals=0)\n", (718, 782), True, 'import dash_core_components as dcc\n'), ((824, 884), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""intermediate-value"""', 'style': "{'display': 'none'}"}), "(id='intermediate-value', style={'display': 'none'})\n", (832, 884), True, 'import dash_html_components as html\n'), ((1026, 1055), 'json.loads', 'json.loads', (['measurements_json'], {}), '(measurements_json)\n', (1036, 1055), False, 'import json\n'), ((2475, 2510), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout', 'data': 'data'}), '(layout=layout, data=data)\n', (2484, 2510), True, 'import plotly.graph_objs as go\n'), ((2609, 2651), 'dash.dependencies.Input', 'Input', (['"""interval-component"""', '"""n_intervals"""'], {}), "('interval-component', 'n_intervals')\n", (2614, 2651), False, 'from dash.dependencies import Input, Output, State\n'), ((3706, 3775), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""TerraPi"""', '"""conf/config-sample.yaml"""'], {}), "('TerraPi', 'conf/config-sample.yaml')\n", (3737, 3775), False, 'import pkg_resources\n'), ((3826, 3838), 'os.path.isfile', 'isfile', (['path'], {}), '(path)\n', (3832, 3838), False, 'from os.path import expanduser, isfile\n'), ((3919, 3967), 'logging.error', 'logging.error', (['"""No config file found! Exiting.."""'], {}), "('No config file found! Exiting..')\n", (3932, 3967), False, 'import logging\n'), ((3976, 3987), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3984, 3987), False, 'import sys\n'), ((4047, 4064), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (4056, 4064), False, 'import yaml\n'), ((4092, 4140), 'logging.error', 'logging.error', (['"""Empty configuration! Exiting..."""'], {}), "('Empty configuration! Exiting...')\n", (4105, 4140), False, 'import logging\n'), ((4149, 4160), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4157, 4160), False, 'import sys\n'), ((4256, 4319), 'logging.info', 'logging.info', (['"""Database configuration not found, using SQLite."""'], {}), "('Database configuration not found, using SQLite.')\n", (4268, 4319), False, 'import logging\n'), ((4339, 4400), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""TerraPi"""', '"""data/terrapi.db"""'], {}), "('TerraPi', 'data/terrapi.db')\n", (4370, 4400), False, 'import pkg_resources\n'), ((3572, 3587), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (3582, 3587), False, 'from os.path import expanduser, isfile\n'), ((3632, 3647), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (3642, 3647), False, 'from os.path import expanduser, isfile\n'), ((5107, 5132), 'dash.dependencies.Output', 'Output', (['st.name', '"""figure"""'], {}), "(st.name, 'figure')\n", (5113, 5132), False, 'from dash.dependencies import Input, Output, State\n'), ((4841, 4866), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': 's.type.name'}), '(id=s.type.name)\n', (4850, 4866), True, 'import dash_core_components as dcc\n'), ((5147, 5186), 'dash.dependencies.Input', 'Input', (['"""intermediate-value"""', '"""children"""'], {}), "('intermediate-value', 'children')\n", (5152, 5186), False, 'from dash.dependencies import Input, Output, State\n'), ((5202, 5232), 'dash.dependencies.State', 'State', (['st.name', '"""relayoutData"""'], {}), "(st.name, 'relayoutData')\n", (5207, 5232), False, 'from dash.dependencies import Input, Output, State\n'), ((3067, 3081), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3079, 3081), False, 'from datetime import datetime, timedelta\n')]
|
from ast import literal_eval
from flask import Flask, jsonify, render_template, request, Response
from scripts.downloader import download_hashes
from scripts.checks import check_info
app = Flask(__name__)
@app.route("/check/")
def check():
"""
Does the following checks:
- The album hash (album_hash) is less than 8 characters long
- The album exists (a get request gives a 200)
- The album exists and has images (some have 0 images)
- The .ini file exists (relative to cwd aka directory this file is in)
- If the new directory option was checked:
- Will attempt to make the new directory
- If it isn't checked:
- The chosen directory exists
- If the empty directory option was checked:
- The chosen directory is also empty
If it passes these checks, the list of the image URLs are passed back.
Otherwise, a response is passed back that triggers an alert and doesn't
follow through with the download.
"""
new_dir = literal_eval(request.args.get("new_dir"))
empty_dir = literal_eval(request.args.get("empty_dir"))
img_dir = request.args.get("img_dir")
album_hash = request.args.get("album_hash")
response, img_list = check_info(new_dir, empty_dir, img_dir, album_hash)
return jsonify(response=response, img_list=img_list)
@app.route("/download_album/<hash_id>")
def download_album(hash_id):
"""
Downloads the album and returns info to the front such as current pic
number downloaded, total to be downloaded and so on.
"""
album_hash = request.args.get("album_hash")
img_dir = request.args.get("img_dir")
new_dir = literal_eval(request.args.get("new_dir"))
empty_dir = literal_eval(request.args.get("empty_dir"))
img_list = literal_eval(request.args.get("img_list"))
return Response(
download_hashes(album_hash, img_dir, hash_id, img_list),
mimetype="text/event-stream",
)
@app.route("/")
def index():
return render_template("main.html")
if __name__ == "__main__":
app.debug = True
app.run(threaded=True)
|
[
"scripts.checks.check_info",
"flask.request.args.get",
"scripts.downloader.download_hashes",
"flask.Flask",
"flask.jsonify",
"flask.render_template"
] |
[((192, 207), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1164, 1191), 'flask.request.args.get', 'request.args.get', (['"""img_dir"""'], {}), "('img_dir')\n", (1180, 1191), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1209, 1239), 'flask.request.args.get', 'request.args.get', (['"""album_hash"""'], {}), "('album_hash')\n", (1225, 1239), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1266, 1317), 'scripts.checks.check_info', 'check_info', (['new_dir', 'empty_dir', 'img_dir', 'album_hash'], {}), '(new_dir, empty_dir, img_dir, album_hash)\n', (1276, 1317), False, 'from scripts.checks import check_info\n'), ((1330, 1375), 'flask.jsonify', 'jsonify', ([], {'response': 'response', 'img_list': 'img_list'}), '(response=response, img_list=img_list)\n', (1337, 1375), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1611, 1641), 'flask.request.args.get', 'request.args.get', (['"""album_hash"""'], {}), "('album_hash')\n", (1627, 1641), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1656, 1683), 'flask.request.args.get', 'request.args.get', (['"""img_dir"""'], {}), "('img_dir')\n", (1672, 1683), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((2031, 2059), 'flask.render_template', 'render_template', (['"""main.html"""'], {}), "('main.html')\n", (2046, 2059), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1061, 1088), 'flask.request.args.get', 'request.args.get', (['"""new_dir"""'], {}), "('new_dir')\n", (1077, 1088), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1119, 1148), 'flask.request.args.get', 'request.args.get', (['"""empty_dir"""'], {}), "('empty_dir')\n", (1135, 1148), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1711, 1738), 'flask.request.args.get', 'request.args.get', (['"""new_dir"""'], {}), "('new_dir')\n", (1727, 1738), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1769, 1798), 'flask.request.args.get', 'request.args.get', (['"""empty_dir"""'], {}), "('empty_dir')\n", (1785, 1798), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1828, 1856), 'flask.request.args.get', 'request.args.get', (['"""img_list"""'], {}), "('img_list')\n", (1844, 1856), False, 'from flask import Flask, jsonify, render_template, request, Response\n'), ((1888, 1943), 'scripts.downloader.download_hashes', 'download_hashes', (['album_hash', 'img_dir', 'hash_id', 'img_list'], {}), '(album_hash, img_dir, hash_id, img_list)\n', (1903, 1943), False, 'from scripts.downloader import download_hashes\n')]
|
#!/usr/bin/env python
'''
KDH at an individual k-point
'''
from functools import reduce
import numpy
from pyscf.pbc import gto
from pyscf import pbcdh, lib
#lib.num_threads(28)
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
#
# DF-KDH with 2x2x2 k-points
#
kpts = cell.make_kpts([2,2,2])
#kpts = cell.make_kpts([4,4,4])
#kmf = scf.KRHF(cell)#.rs_density_fit()
#kmf.kpts = kpts
#ehf = kmf.kernel()
mypt = pbcdh.KDH(cell, xc="XYG3", kpts=kpts)
mypt.max_memory = 10000
mypt.kernel()
print("PBC-XYG3 energy (per unit cell) =", mypt.e_tot)
|
[
"pyscf.pbcdh.KDH",
"pyscf.pbc.gto.Cell"
] |
[((188, 198), 'pyscf.pbc.gto.Cell', 'gto.Cell', ([], {}), '()\n', (196, 198), False, 'from pyscf.pbc import gto\n'), ((725, 762), 'pyscf.pbcdh.KDH', 'pbcdh.KDH', (['cell'], {'xc': '"""XYG3"""', 'kpts': 'kpts'}), "(cell, xc='XYG3', kpts=kpts)\n", (734, 762), False, 'from pyscf import pbcdh, lib\n')]
|
from data_collection.read_sentinel import pair_imagenames
from utils.set_user_input import set_arguments_pipeline
from utils.raster_helper import read_url_image, read_input_geometry, array2raster
import numpy as np
import rasterio
def compute_ndvi(band_inf, bands=["red", "nir"]):
"""
This function computes the ndvi (normalized difference vegetation index)
from the image resulting of the data catalog search.
"""
input_geometry = read_input_geometry(set_arguments_pipeline()["input_geometry"])
post_fix = "_band_info"
red_band = band_inf[bands[0] + post_fix]
nir_band = band_inf[bands[1] + post_fix]
imagepairs_url_list = pair_imagenames(red_band, nir_band)
ndvi_results = {}
progress_counter = 0
for image_pair in imagepairs_url_list:
band_red_url = [
red_url for red_url in imagepairs_url_list[image_pair] if "B04" in red_url
][0]
band_nir_url = [
nir_url for nir_url in imagepairs_url_list[image_pair] if "B08" in nir_url
][0]
band_red_image = read_url_image(band_red_url, input_geometry).astype(float)
band_nir_image = read_url_image(band_nir_url, input_geometry).astype(float)
ndvi_result = np.empty(band_red_image.shape, dtype=rasterio.float32)
check = np.logical_or(band_red_image > 0, band_nir_image > 0)
ndvi_result = np.where(
check,
(band_nir_image - band_red_image) / (band_nir_image + band_red_image),
-999,
)
array2raster(ndvi_result, input_geometry, band_red_url)
ndvi_results[image_pair] = [ndvi_result]
progress_counter += 1
print(
"{0} of {1} images processed".format(
progress_counter, len(imagepairs_url_list)
)
)
return ndvi_results
|
[
"data_collection.read_sentinel.pair_imagenames",
"utils.raster_helper.array2raster",
"numpy.empty",
"utils.set_user_input.set_arguments_pipeline",
"numpy.where",
"utils.raster_helper.read_url_image",
"numpy.logical_or"
] |
[((663, 698), 'data_collection.read_sentinel.pair_imagenames', 'pair_imagenames', (['red_band', 'nir_band'], {}), '(red_band, nir_band)\n', (678, 698), False, 'from data_collection.read_sentinel import pair_imagenames\n'), ((1229, 1283), 'numpy.empty', 'np.empty', (['band_red_image.shape'], {'dtype': 'rasterio.float32'}), '(band_red_image.shape, dtype=rasterio.float32)\n', (1237, 1283), True, 'import numpy as np\n'), ((1300, 1353), 'numpy.logical_or', 'np.logical_or', (['(band_red_image > 0)', '(band_nir_image > 0)'], {}), '(band_red_image > 0, band_nir_image > 0)\n', (1313, 1353), True, 'import numpy as np\n'), ((1376, 1472), 'numpy.where', 'np.where', (['check', '((band_nir_image - band_red_image) / (band_nir_image + band_red_image))', '(-999)'], {}), '(check, (band_nir_image - band_red_image) / (band_nir_image +\n band_red_image), -999)\n', (1384, 1472), True, 'import numpy as np\n'), ((1524, 1579), 'utils.raster_helper.array2raster', 'array2raster', (['ndvi_result', 'input_geometry', 'band_red_url'], {}), '(ndvi_result, input_geometry, band_red_url)\n', (1536, 1579), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n'), ((475, 499), 'utils.set_user_input.set_arguments_pipeline', 'set_arguments_pipeline', ([], {}), '()\n', (497, 499), False, 'from utils.set_user_input import set_arguments_pipeline\n'), ((1064, 1108), 'utils.raster_helper.read_url_image', 'read_url_image', (['band_red_url', 'input_geometry'], {}), '(band_red_url, input_geometry)\n', (1078, 1108), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n'), ((1148, 1192), 'utils.raster_helper.read_url_image', 'read_url_image', (['band_nir_url', 'input_geometry'], {}), '(band_nir_url, input_geometry)\n', (1162, 1192), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import numpy as np
import plotly.express as px
# pd.options.display.float_format = '${:,.2f}'.format
# Load the data
data = pd.read_excel("./data/Online_Retail.xlsx")
# remove duplicate rows
filtered_data = data.drop_duplicates()
filtered_data.columns
# Plot the bar chart of countries
filtered_data.Country.value_counts()[:10].plot(kind='bar')
# Filter all quantities that are greater than zero
filtered_data = filtered_data[(filtered_data['Quantity']>0)]
# list(filtered_data.Country.unique())
filtered_data = filtered_data [['CustomerID','Description','InvoiceDate','InvoiceNo','Quantity','UnitPrice', 'Country']]
# Calculate total purchase
filtered_data['TotalPurchase'] = filtered_data['Quantity'] * filtered_data['UnitPrice']
filtered_data_group = filtered_data.groupby(['CustomerID','Country']).agg({'InvoiceDate': lambda date: (date.max() - date.min()).days,
'InvoiceNo': lambda num: len(num),
'Quantity': lambda quant: quant.sum(),
'TotalPurchase': lambda price: price.sum()})
# Change the name of columns
filtered_data_group.columns=['num_days','num_transactions','num_units','spent_money']
# Average Order Value
filtered_data_group['avg_order_value'] = filtered_data_group['spent_money']/filtered_data_group['num_transactions']
# Calculate purchase frequency
purchase_frequency = sum(filtered_data_group['num_transactions'])/filtered_data_group.shape[0]
# Repeat rate
repeat_rate = round(filtered_data_group[filtered_data_group.num_transactions > 1].shape[0]/filtered_data_group.shape[0],2)
# Churn Percentage
churn_rate = round(1-repeat_rate,2)
filtered_data_group.reset_index()
filtered_data_group['profit_margin'] = filtered_data_group['spent_money']*0.05
# Customer Value
filtered_data_group['CLV'] = (filtered_data_group['avg_order_value']*purchase_frequency)/churn_rate
# Resetting the index
filtered_data_group.reset_index(inplace = True)
# Formatting the currency fields
# filtered_data_group['spent_money', 'avg_order_value','profit_margin'] = filtered_data_group.spent_money.apply(lambda x : "{:,}".format(x))
df_plot = filtered_data.groupby(['Country','Description','UnitPrice','Quantity']).agg({'TotalPurchase': 'sum'},{'Quantity':'sum'}).reset_index()
# df2 = df1.loc[df1['Country'] == 'USA']
# px.scatter(df_plot[:25000], x="UnitPrice", y="TotalPurchase", color = 'Quantity', size='Quantity', title="Product Sales", size_max=20, log_y= True, log_x= True)
fig_UnitPriceVsQuantity = px.scatter(df_plot[:25000], x="UnitPrice", y="Quantity", color = 'Country',
size='TotalPurchase', size_max=20, log_y= True, log_x= True, title= "PURCHASE TREND ACROSS COUNTRIES")
# formating the float fields
var_float_filtered_group = [i for i in filtered_data_group.columns if filtered_data_group.dtypes[i]=='float64']
for i in var_float_filtered_group:
filtered_data_group[i] = filtered_data_group[i].round(2)
filtered_data_group[i].apply(lambda x : "{:,}".format(x))
var_float_filtered = [i for i in filtered_data.columns if filtered_data.dtypes[i]=='float64']
for i in var_float_filtered:
filtered_data[i] = filtered_data[i].round(2)
filtered_data[i].apply(lambda x : "{:,}".format(x))
|
[
"pandas.read_excel",
"plotly.express.scatter"
] |
[((224, 266), 'pandas.read_excel', 'pd.read_excel', (['"""./data/Online_Retail.xlsx"""'], {}), "('./data/Online_Retail.xlsx')\n", (237, 266), True, 'import pandas as pd\n'), ((2647, 2829), 'plotly.express.scatter', 'px.scatter', (['df_plot[:25000]'], {'x': '"""UnitPrice"""', 'y': '"""Quantity"""', 'color': '"""Country"""', 'size': '"""TotalPurchase"""', 'size_max': '(20)', 'log_y': '(True)', 'log_x': '(True)', 'title': '"""PURCHASE TREND ACROSS COUNTRIES"""'}), "(df_plot[:25000], x='UnitPrice', y='Quantity', color='Country',\n size='TotalPurchase', size_max=20, log_y=True, log_x=True, title=\n 'PURCHASE TREND ACROSS COUNTRIES')\n", (2657, 2829), True, 'import plotly.express as px\n')]
|
from saboteur.agent import SaboteurWebApp
import json
import unittest
from test_utils import MockShell
from saboteur.apicommands import FAULT_TYPES, alphabetical_keys
def post_request(params):
return request('POST', params)
def delete_request():
return {'path': '/',
'method': 'DELETE'}
def request(method, params):
return {'path': '/',
'method': method,
'body': json.dumps(params)}
def http_request(method, params_json):
return {'path': '/',
'method': method,
'body': params_json}
class TestAgent(unittest.TestCase):
def setUp(self):
self.shell = MockShell()
self.app = SaboteurWebApp(self.shell)
def test_successful_iptables_based_fault_returns_200_and_executes_correct_command(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.last_command, 'sudo /sbin/iptables -A INPUT -p TCP -j DROP --dport 80')
def test_invalid_json_returns_400(self):
params = '{ "name": }'
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps('Not valid JSON'), response['body'])
def test_invalid_fault_type(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'WORMS'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"type": "must be present and one of " + str(alphabetical_keys(FAULT_TYPES))
}
}),
response['body'])
def test_fault_with_single_invalid_field_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'to_port': 7871
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"direction": "required key not provided"
}
}),
response['body'])
def test_fault_with_multiple_invalid_fields_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'DELAY',
'direction': 'IN',
'to_port': 7871,
'delay': 'bad',
'probability': 'worse'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"delay": "expected int",
"probability": "expected float"
}
}),
response['body'])
def test_reset(self):
self.shell.next_result = 'eth1'
response = self.app.handle(delete_request())
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.commands, [
'sudo /sbin/iptables -F',
"netstat -i | tail -n+3 | cut -f1 -d ' '",
'sudo /sbin/tc qdisc del dev eth1 root'])
def test_returns_500_when_shell_command_exits_with_non_zero(self):
params = json.dumps({
'name': 'whatever',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
self.shell.next_exit_code = 1
response = self.app.handle(http_request('POST', params))
self.assertEqual(500, response['status'])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"saboteur.agent.SaboteurWebApp",
"saboteur.apicommands.alphabetical_keys",
"json.dumps",
"test_utils.MockShell"
] |
[((3966, 3981), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3979, 3981), False, 'import unittest\n'), ((417, 435), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (427, 435), False, 'import json\n'), ((646, 657), 'test_utils.MockShell', 'MockShell', ([], {}), '()\n', (655, 657), False, 'from test_utils import MockShell\n'), ((677, 703), 'saboteur.agent.SaboteurWebApp', 'SaboteurWebApp', (['self.shell'], {}), '(self.shell)\n', (691, 703), False, 'from saboteur.agent import SaboteurWebApp\n'), ((815, 941), 'json.dumps', 'json.dumps', (["{'name': 'isolate-web-server', 'type': 'NETWORK_FAILURE', 'direction': 'IN',\n 'to_port': 80, 'protocol': 'TCP'}"], {}), "({'name': 'isolate-web-server', 'type': 'NETWORK_FAILURE',\n 'direction': 'IN', 'to_port': 80, 'protocol': 'TCP'})\n", (825, 941), False, 'import json\n'), ((1553, 1612), 'json.dumps', 'json.dumps', (["{'name': 'isolate-web-server', 'type': 'WORMS'}"], {}), "({'name': 'isolate-web-server', 'type': 'WORMS'})\n", (1563, 1612), False, 'import json\n'), ((2067, 2157), 'json.dumps', 'json.dumps', (["{'name': 'isolate-web-server', 'type': 'NETWORK_FAILURE', 'to_port': 7871}"], {}), "({'name': 'isolate-web-server', 'type': 'NETWORK_FAILURE',\n 'to_port': 7871})\n", (2077, 2157), False, 'import json\n'), ((2588, 2727), 'json.dumps', 'json.dumps', (["{'name': 'isolate-web-server', 'type': 'DELAY', 'direction': 'IN',\n 'to_port': 7871, 'delay': 'bad', 'probability': 'worse'}"], {}), "({'name': 'isolate-web-server', 'type': 'DELAY', 'direction':\n 'IN', 'to_port': 7871, 'delay': 'bad', 'probability': 'worse'})\n", (2598, 2727), False, 'import json\n'), ((3596, 3712), 'json.dumps', 'json.dumps', (["{'name': 'whatever', 'type': 'NETWORK_FAILURE', 'direction': 'IN',\n 'to_port': 80, 'protocol': 'TCP'}"], {}), "({'name': 'whatever', 'type': 'NETWORK_FAILURE', 'direction':\n 'IN', 'to_port': 80, 'protocol': 'TCP'})\n", (3606, 3712), False, 'import json\n'), ((1448, 1476), 'json.dumps', 'json.dumps', (['"""Not valid JSON"""'], {}), "('Not valid JSON')\n", (1458, 1476), False, 'import json\n'), ((2340, 2406), 'json.dumps', 'json.dumps', (["{'errors': {'direction': 'required key not provided'}}"], {}), "({'errors': {'direction': 'required key not provided'}})\n", (2350, 2406), False, 'import json\n'), ((2946, 3032), 'json.dumps', 'json.dumps', (["{'errors': {'delay': 'expected int', 'probability': 'expected float'}}"], {}), "({'errors': {'delay': 'expected int', 'probability':\n 'expected float'}})\n", (2956, 3032), False, 'import json\n'), ((1884, 1914), 'saboteur.apicommands.alphabetical_keys', 'alphabetical_keys', (['FAULT_TYPES'], {}), '(FAULT_TYPES)\n', (1901, 1914), False, 'from saboteur.apicommands import FAULT_TYPES, alphabetical_keys\n')]
|
import os
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
# Change the info here
messenger_link = "https://www.messenger.com/t/abc"
email = ""
password = ""
message = ""
os.environ["SELENIUM_SERVER_JAR"] = "selenium-server-standalone-2.41.0.jar"
browser = webdriver.Chrome("/Users/nvravicharan/Desktop/chromedriver")
# makes the browser wait if it can't find an element
browser.implicitly_wait(10)
browser.get(messenger_link)
time.sleep(2)
e_input = browser.find_element_by_xpath("//input[@id='email']")
p_input = browser.find_element_by_xpath("//input[@id='pass']")
e_input.send_keys(email)
p_input.send_keys(password)
submit = browser.find_element_by_xpath("//button[@id='loginbutton']")
submit.submit()
text_field = browser.find_element_by_xpath("//div[@aria-label='Type a message...']")
text_field.send_keys(message)
browser.find_element_by_xpath("//a[@aria-label='Send']").click()
time.sleep(5)
browser.quit()
|
[
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((312, 372), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""/Users/nvravicharan/Desktop/chromedriver"""'], {}), "('/Users/nvravicharan/Desktop/chromedriver')\n", (328, 372), False, 'from selenium import webdriver\n'), ((484, 497), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (494, 497), False, 'import time\n'), ((946, 959), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (956, 959), False, 'import time\n')]
|
import ast
import numba
from numba import *
from numba import error
from numba import typesystem
from numba import visitors
from numba import nodes
from numba import function_util
from numba.exttypes import virtual
from numba.traits import traits, Delegate
class ExtensionTypeLowerer(visitors.NumbaTransformer):
"""
Lower extension type attribute accesses and method calls.
"""
def get_handler(self, ext_type):
if ext_type.is_extension and not ext_type.is_autojit_exttype:
return StaticExtensionHandler()
else:
assert ext_type.is_autojit_exttype, ext_type
return DynamicExtensionHandler()
# ______________________________________________________________________
# Attributes
def visit_ExtTypeAttribute(self, node):
"""
Resolve an extension attribute.
"""
handler = self.get_handler(node.ext_type)
self.visitchildren(node)
return handler.handle_attribute_lookup(self.env, node)
# ______________________________________________________________________
# Methods
def visit_NativeFunctionCallNode(self, node):
if node.signature.is_bound_method:
assert isinstance(node.function, nodes.ExtensionMethod)
self.visitlist(node.args)
node = self.visit_ExtensionMethod(node.function, node)
else:
self.visitchildren(node)
return node
def visit_ExtensionMethod(self, node, call_node=None):
"""
Resolve an extension method. We currently only support immediate
calls of extension methods.
"""
if call_node is None:
raise error.NumbaError(node, "Referenced extension method '%s' "
"must be called" % node.attr)
handler = self.get_handler(node.ext_type)
return handler.handle_method_call(self.env, node, call_node)
#------------------------------------------------------------------------
# Handle Static VTable Attributes and Methods
#------------------------------------------------------------------------
class StaticExtensionHandler(object):
"""
Handle attribute lookup and method calls for static extensions
with C++/Cython-like virtual method tables and static object layouts.
"""
def handle_attribute_lookup(self, env, node):
"""
Resolve an extension attribute for a static object layout.
((attributes_struct *)
(((char *) obj) + attributes_offset))->attribute
:node: ExtTypeAttribute AST node
"""
ext_type = node.value.type
offset = ext_type.attr_offset
type = ext_type.attribute_table.to_struct()
if isinstance(node.ctx, ast.Load):
value_type = type.ref() # Load result
else:
value_type = type.pointer() # Use pointer for storage
struct_pointer = nodes.value_at_offset(node.value, offset,
value_type)
result = nodes.StructAttribute(struct_pointer, node.attr,
node.ctx, type.ref())
return result
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a static (C++/Cython-like) vtable:
typedef {
double (*method1)(double);
...
} vtab_struct;
vtab_struct *vtab = *(vtab_struct **) (((char *) obj) + vtab_offset)
void *method = vtab[index]
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.value.type
offset = ext_type.vtab_offset
vtable_struct = ext_type.vtab_type.to_struct()
vtable_struct_type = vtable_struct.ref()
vtab_struct_pointer_pointer = nodes.value_at_offset(
node.value, offset,vtable_struct_type.pointer())
vtab_struct_pointer = nodes.DereferenceNode(vtab_struct_pointer_pointer)
vmethod = nodes.StructAttribute(vtab_struct_pointer, node.attr,
ast.Load(), vtable_struct_type)
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
result = nodes.NativeFunctionCallNode(node.type, vmethod, args)
return result
#------------------------------------------------------------------------
# Handle Dynamic VTable Attributes and Methods
#------------------------------------------------------------------------
@traits
class DynamicExtensionHandler(object):
"""
Handle attribute lookup and method calls for autojit extensions
with dynamic perfect-hash-based virtual method tables and dynamic
object layouts.
"""
static_handler = StaticExtensionHandler()
# TODO: Implement hash-based attribute lookup
handle_attribute_lookup = Delegate('static_handler')
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a dynamic hash-based vtable:
PyCustomSlots_Table ***vtab_slot = (((char *) obj) + vtab_offset)
lookup_virtual_method(*vtab_slot)
We may cache (*vtab_slot), but we may not cache (**vtab_slot), since
compilations may regenerate the table.
However, we could *preload* (**vtab_slot), where function calls
invalidate the preload, if we were so inclined.
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.ext_type
func_signature = node.type #typesystem.extmethod_to_function(node.type)
offset = ext_type.vtab_offset
# __________________________________________________________________
# Retrieve vtab
vtab_ppp = nodes.value_at_offset(node.value, offset,
void.pointer().pointer())
vtab_struct_pp = nodes.DereferenceNode(vtab_ppp)
# __________________________________________________________________
# Calculate pre-hash
prehash = virtual.hash_signature(func_signature, func_signature.name)
prehash_node = nodes.ConstNode(prehash, uint64)
# __________________________________________________________________
# Retrieve method pointer
# A method is always present when it was given a static signature,
# e.g. @double(double)
always_present = node.attr in ext_type.vtab_type.methodnames
args = [vtab_struct_pp, prehash_node]
# lookup_impl = NumbaVirtualLookup()
lookup_impl = DebugVirtualLookup()
ptr = lookup_impl.lookup(env, always_present, node, args)
vmethod = ptr.coerce(func_signature.pointer())
vmethod = vmethod.cloneable
# __________________________________________________________________
# Call method pointer
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
method_call = nodes.NativeFunctionCallNode(func_signature, vmethod, args)
# __________________________________________________________________
# Generate fallback
# TODO: Subclassing!
# if not always_present:
# # TODO: Enable this path and generate a phi for the result
# # Generate object call
# obj_args = [nodes.CoercionNode(arg, object_) for arg in args]
# obj_args.append(nodes.NULL)
# object_call = function_util.external_call(
# env.context, env.crnt.llvm_module,
# 'PyObject_CallMethodObjArgs', obj_args)
#
# # if vmethod != NULL: vmethod(obj, ...)
# # else: obj.method(...)
# method_call = nodes.if_else(
# ast.NotEq(),
# vmethod.clone, nodes.NULL,
# lhs=method_call, rhs=object_call)
return method_call
#------------------------------------------------------------------------
# Method lookup
#------------------------------------------------------------------------
def call_jit(jit_func, args):
return nodes.NativeCallNode(jit_func.signature, args, jit_func.lfunc)
class NumbaVirtualLookup(object):
"""
Use a numba function from numba.utility.virtuallookup to look up virtual
methods in a hash table.
"""
def lookup(self, env, always_present, node, args):
"""
:param node: ExtensionMethodNode
:param args: [vtable_node, prehash_node]
:return: The virtual method as a Node
"""
from numba.utility import virtuallookup
if always_present and False:
lookup = virtuallookup.lookup_method
else:
lookup = virtuallookup.lookup_and_assert_method
args.append(nodes.const(node.attr, c_string_type))
vmethod = call_jit(lookup, args)
return vmethod
class DebugVirtualLookup(object):
"""
Use a C utility function from numba/utility/utilities/virtuallookup.c
to look up virtual methods in a hash table.
Use for debugging.
"""
def lookup(self, env, always_present, node, args):
args.append(nodes.const(node.attr, c_string_type))
vmethod = function_util.utility_call(
env.context, env.crnt.llvm_module,
"lookup_method", args)
return vmethod
|
[
"numba.nodes.DereferenceNode",
"numba.nodes.CloneableNode",
"numba.nodes.NativeCallNode",
"ast.Load",
"numba.error.NumbaError",
"numba.exttypes.virtual.hash_signature",
"numba.nodes.const",
"numba.traits.Delegate",
"numba.nodes.NativeFunctionCallNode",
"numba.function_util.utility_call",
"numba.nodes.value_at_offset",
"numba.nodes.ConstNode",
"numba.nodes.CloneNode"
] |
[((5003, 5029), 'numba.traits.Delegate', 'Delegate', (['"""static_handler"""'], {}), "('static_handler')\n", (5011, 5029), False, 'from numba.traits import traits, Delegate\n'), ((8330, 8392), 'numba.nodes.NativeCallNode', 'nodes.NativeCallNode', (['jit_func.signature', 'args', 'jit_func.lfunc'], {}), '(jit_func.signature, args, jit_func.lfunc)\n', (8350, 8392), False, 'from numba import nodes\n'), ((2946, 2999), 'numba.nodes.value_at_offset', 'nodes.value_at_offset', (['node.value', 'offset', 'value_type'], {}), '(node.value, offset, value_type)\n', (2967, 2999), False, 'from numba import nodes\n'), ((3666, 3697), 'numba.nodes.CloneableNode', 'nodes.CloneableNode', (['node.value'], {}), '(node.value)\n', (3685, 3697), False, 'from numba import nodes\n'), ((4030, 4080), 'numba.nodes.DereferenceNode', 'nodes.DereferenceNode', (['vtab_struct_pointer_pointer'], {}), '(vtab_struct_pointer_pointer)\n', (4051, 4080), False, 'from numba import nodes\n'), ((4378, 4432), 'numba.nodes.NativeFunctionCallNode', 'nodes.NativeFunctionCallNode', (['node.type', 'vmethod', 'args'], {}), '(node.type, vmethod, args)\n', (4406, 4432), False, 'from numba import nodes\n'), ((5638, 5669), 'numba.nodes.CloneableNode', 'nodes.CloneableNode', (['node.value'], {}), '(node.value)\n', (5657, 5669), False, 'from numba import nodes\n'), ((6078, 6109), 'numba.nodes.DereferenceNode', 'nodes.DereferenceNode', (['vtab_ppp'], {}), '(vtab_ppp)\n', (6099, 6109), False, 'from numba import nodes\n'), ((6236, 6295), 'numba.exttypes.virtual.hash_signature', 'virtual.hash_signature', (['func_signature', 'func_signature.name'], {}), '(func_signature, func_signature.name)\n', (6258, 6295), False, 'from numba.exttypes import virtual\n'), ((6319, 6351), 'numba.nodes.ConstNode', 'nodes.ConstNode', (['prehash', 'uint64'], {}), '(prehash, uint64)\n', (6334, 6351), False, 'from numba import nodes\n'), ((7197, 7256), 'numba.nodes.NativeFunctionCallNode', 'nodes.NativeFunctionCallNode', (['func_signature', 'vmethod', 'args'], {}), '(func_signature, vmethod, args)\n', (7225, 7256), False, 'from numba import nodes\n'), ((9433, 9521), 'numba.function_util.utility_call', 'function_util.utility_call', (['env.context', 'env.crnt.llvm_module', '"""lookup_method"""', 'args'], {}), "(env.context, env.crnt.llvm_module,\n 'lookup_method', args)\n", (9459, 9521), False, 'from numba import function_util\n'), ((1682, 1771), 'numba.error.NumbaError', 'error.NumbaError', (['node', '("Referenced extension method \'%s\' must be called" % node.attr)'], {}), '(node, "Referenced extension method \'%s\' must be called" %\n node.attr)\n', (1698, 1771), False, 'from numba import error\n'), ((4194, 4204), 'ast.Load', 'ast.Load', ([], {}), '()\n', (4202, 4204), False, 'import ast\n'), ((4332, 4359), 'numba.nodes.CloneNode', 'nodes.CloneNode', (['node.value'], {}), '(node.value)\n', (4347, 4359), False, 'from numba import nodes\n'), ((7146, 7173), 'numba.nodes.CloneNode', 'nodes.CloneNode', (['node.value'], {}), '(node.value)\n', (7161, 7173), False, 'from numba import nodes\n'), ((9376, 9413), 'numba.nodes.const', 'nodes.const', (['node.attr', 'c_string_type'], {}), '(node.attr, c_string_type)\n', (9387, 9413), False, 'from numba import nodes\n'), ((8999, 9036), 'numba.nodes.const', 'nodes.const', (['node.attr', 'c_string_type'], {}), '(node.attr, c_string_type)\n', (9010, 9036), False, 'from numba import nodes\n')]
|
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pickle
import datetime
# What the program can access within Calendar
# See more at https://developers.google.com/calendar/auth
scopes = ["https://www.googleapis.com/auth/calendar"]
flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
# Use this to pull the users credentials into a pickle file
#credentials = flow.run_console()
#pickle.dump(credentials, open("token.pkl", "wb"))
# Read the credentials from a saved pickle file
credentials = pickle.load(open("token.pkl", "rb"))
# Build the calendar resource
service = build("calendar", "v3", credentials=credentials)
# Store a list of Calendars on the account
result = service.calendarList().list().execute()
calendar_id = result["items"][1]["id"]
result = service.events().list(calendarId=calendar_id).execute()
def create_event(shift_information):
"""
Create a Google Calendar Event
Args:
my_event: dictionary
"""
print("Created Event for " + str(shift_information[2]))
year = shift_information[2].year
month = shift_information[2].month
day = shift_information[2].day
start_hour = int(shift_information[0][0:2])
start_min = int(shift_information[0][-2:])
end_hour = int(shift_information[1][0:2])
end_min = int(shift_information[1][-2:])
start_date_time = datetime.datetime(year, month, day, start_hour, start_min)
end_date_time = datetime.datetime(year, month, day, end_hour, end_min)
# If the shift carries over into another day
if shift_information[1][0] == '0':
end_date_time += datetime.timedelta(days=1)
event = {
"summary": 'Work',
"location": 'Carlow D/T MSA',
"description": 'Shift',
"start": {
"dateTime": start_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"end": {
"dateTime": end_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"reminders": {
"useDefault": False,
},
}
return service.events().insert(calendarId=calendar_id, body=event, sendNotifications=True).execute()
def is_events_this_week(start_date):
end_date = start_date + datetime.timedelta(days=7)
start_date = start_date.isoformat() + 'Z'
end_date = end_date.isoformat() + 'Z'
tests = service.events().list(calendarId=calendar_id, timeMin=start_date, timeMax=end_date).execute()
if not tests['items']:
return False
else:
return True
#check_events()
|
[
"google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"googleapiclient.discovery.build",
"datetime.timedelta",
"datetime.datetime"
] |
[((296, 374), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['"""client_secret.json"""'], {'scopes': 'scopes'}), "('client_secret.json', scopes=scopes)\n", (337, 374), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((662, 710), 'googleapiclient.discovery.build', 'build', (['"""calendar"""', '"""v3"""'], {'credentials': 'credentials'}), "('calendar', 'v3', credentials=credentials)\n", (667, 710), False, 'from googleapiclient.discovery import build\n'), ((1419, 1477), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'start_hour', 'start_min'], {}), '(year, month, day, start_hour, start_min)\n', (1436, 1477), False, 'import datetime\n'), ((1498, 1552), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'end_hour', 'end_min'], {}), '(year, month, day, end_hour, end_min)\n', (1515, 1552), False, 'import datetime\n'), ((1667, 1693), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1685, 1693), False, 'import datetime\n'), ((2331, 2357), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2349, 2357), False, 'import datetime\n')]
|
"""
:Created: 26 July 2015
:Author: <NAME>
"""
from django.conf.urls import url
from basecategory.views import ItemPageView
from software.models import Software
from software.views import SoftwareListView
app_name = "software"
urlpatterns = [
url(
r"^(?P<slug>[\w_-]+)/?$",
ItemPageView.as_view(),
{"items": Software},
name="item_details",
),
url(r"^$", SoftwareListView.as_view(), {"items": Software}, name="software_list"),
]
|
[
"basecategory.views.ItemPageView.as_view",
"software.views.SoftwareListView.as_view"
] |
[((298, 320), 'basecategory.views.ItemPageView.as_view', 'ItemPageView.as_view', ([], {}), '()\n', (318, 320), False, 'from basecategory.views import ItemPageView\n'), ((402, 428), 'software.views.SoftwareListView.as_view', 'SoftwareListView.as_view', ([], {}), '()\n', (426, 428), False, 'from software.views import SoftwareListView\n')]
|
from youtube_transcript_api import YouTubeTranscriptApi, _errors as YouTubeTranscriptApiErrors
from datetime import datetime
from database import con, cur # on importe la connexion et le curseur de la base de donnée
from youtubeAPI import youtubeAPI # on importe la fonction youtubeAPI, qui sert juste à formatter des requêtes à l'API youtube
from config import config
nextPageToken = '' # token retourné par l'API pour accéder à la page de la playlist suivante
run = True
if __name__ == '__main__':
while run:
# on récupère toute les vidéos de la chaine depuis l'API (via la playlist 'uploads')
playlist = youtubeAPI('playlistItems', {
'part': 'snippet',
'pageToken': nextPageToken,
'maxResults': 50, # on veux 50 vidéos (le max. par requètes)
'playlistId': config['playlistId'] # ID de la playlist "uploads"
})
if 'nextPageToken' in playlist:
nextPageToken = playlist['nextPageToken']
else:
run = False
videos = playlist['items'] # 'items' correspond à la liste des vidéos
for video in videos:
video = video['snippet']
print(f'video #{video["position"]}')
videoId = video['resourceId']['videoId']
timestamp = datetime.strptime(video['publishedAt'], '%Y-%m-%dT%H:%M:%SZ').timestamp() # on transforme la date ISO8601 en timestamp UNIX
# on récupère les 10 premiers commentaires
topComments = [] # liste contenant ces commenaires
comments = youtubeAPI('commentThreads', {
'part': 'snippet',
'textFormat': 'plainText', # on veux les commentaires en texte simple
'maxResults': 5, # on veut 5 commentaires
'order': 'relevance', # on trie par 'relevance' (ordre par defaut)
'videoId': videoId # ID de la vidéo
})['items'] # 'items' correspond à la liste des commentaires
for comment in comments:
topComments.append(comment['snippet']['topLevelComment']['snippet']['textDisplay']) # on ajoute le commentaire à la liste (uniquement le texte)
try:
# on récupère les sous-titres français de la vidéo (automatiques et manuels) voir https://pypi.org/project/youtube-transcript-api
transcripts = YouTubeTranscriptApi.list_transcripts(videoId) # on récupère la liste des sous-titres pour la vidéos
except YouTubeTranscriptApiErrors.TranscriptsDisabled: # les sous-titres sont désactivés pour cette vidéo
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
autoCaptionsList = [] # liste contenant les sous-titres automatiques
try:
autoCaptions = transcripts.find_generated_transcript(['fr']).fetch() # on récupère les sous-titres automatiques en français
for caption in autoCaptions:
autoCaptionsList.append(caption['text']) # on ajoute le texte du commentaire à la liste
except YouTubeTranscriptApiErrors.NoTranscriptFound: # les sous-titres sont désactivés / indisponibles
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
manualCaptionsList = [] # liste contenant les sous-titres manuels
try:
manualCaptions = transcripts.find_manually_created_transcript(['fr']).fetch() # on récupère les sous-titres manuels en français
for caption in manualCaptions:
manualCaptionsList.append(caption['text']) # on ajoute le texte du commentaire à la liste
except YouTubeTranscriptApiErrors.NoTranscriptFound: # les sous-titres sont indisponibles
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
# on créér une nouvelle ligne dans la bdd
cur.execute('INSERT INTO videos(id, title, description, timestamp, topComments, autoCaptions, manualCaptions) VALUES(?, ?, ?, ?, ?, ?, ?)',
(videoId, video['title'], video['description'], timestamp, '\n'.join(topComments), '\n'.join(autoCaptionsList), '\n'.join(manualCaptionsList)))
con.commit()
con.close()
|
[
"youtube_transcript_api.YouTubeTranscriptApi.list_transcripts",
"datetime.datetime.strptime",
"youtubeAPI.youtubeAPI",
"database.con.close",
"database.con.commit"
] |
[((4244, 4255), 'database.con.close', 'con.close', ([], {}), '()\n', (4253, 4255), False, 'from database import con, cur\n'), ((629, 763), 'youtubeAPI.youtubeAPI', 'youtubeAPI', (['"""playlistItems"""', "{'part': 'snippet', 'pageToken': nextPageToken, 'maxResults': 50,\n 'playlistId': config['playlistId']}"], {}), "('playlistItems', {'part': 'snippet', 'pageToken': nextPageToken,\n 'maxResults': 50, 'playlistId': config['playlistId']})\n", (639, 763), False, 'from youtubeAPI import youtubeAPI\n'), ((4231, 4243), 'database.con.commit', 'con.commit', ([], {}), '()\n', (4241, 4243), False, 'from database import con, cur\n'), ((1562, 1701), 'youtubeAPI.youtubeAPI', 'youtubeAPI', (['"""commentThreads"""', "{'part': 'snippet', 'textFormat': 'plainText', 'maxResults': 5, 'order':\n 'relevance', 'videoId': videoId}"], {}), "('commentThreads', {'part': 'snippet', 'textFormat': 'plainText',\n 'maxResults': 5, 'order': 'relevance', 'videoId': videoId})\n", (1572, 1701), False, 'from youtubeAPI import youtubeAPI\n'), ((2372, 2418), 'youtube_transcript_api.YouTubeTranscriptApi.list_transcripts', 'YouTubeTranscriptApi.list_transcripts', (['videoId'], {}), '(videoId)\n', (2409, 2418), False, 'from youtube_transcript_api import YouTubeTranscriptApi, _errors as YouTubeTranscriptApiErrors\n'), ((1296, 1357), 'datetime.datetime.strptime', 'datetime.strptime', (["video['publishedAt']", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(video['publishedAt'], '%Y-%m-%dT%H:%M:%SZ')\n", (1313, 1357), False, 'from datetime import datetime\n')]
|
from tests.utils import W3CTestCase
class TestVerticalAlignSub(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'vertical-align-sub-'))
|
[
"tests.utils.W3CTestCase.find_tests"
] |
[((96, 151), 'tests.utils.W3CTestCase.find_tests', 'W3CTestCase.find_tests', (['__file__', '"""vertical-align-sub-"""'], {}), "(__file__, 'vertical-align-sub-')\n", (118, 151), False, 'from tests.utils import W3CTestCase\n')]
|
import torch
import torch.nn as nn
class PositionalEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
max_len: int = 512
) -> None:
super(PositionalEncoder, self).__init__()
self._embed_dim = embed_dim
self._max_len = max_len
self._embed_matrix = torch.tensor(
[[pos / pow(1.0e4, 2.0 * (i // 2) / self._embed_dim) for i in range(self._embed_dim)] for pos in range(self._max_len)]
)
self._embed_matrix[:, 0::2] = torch.sin(self._embed_matrix[:, 0::2])
self._embed_matrix[:, 1::2] = torch.cos(self._embed_matrix[:, 1::2])
self._embedder = nn.Embedding(self._max_len, self._embed_dim)
self._embedder.weight = nn.Parameter(self._embed_matrix, requires_grad=False)
def forward(self, embed: torch.Tensor) -> torch.Tensor:
token_len = embed.size()[1]
if embed.is_cuda:
ids = torch.cuda.LongTensor([l for l in range(token_len)])
else:
ids = torch.LongTensor([l for l in range(token_len)])
embed += self._embedder(ids)
return embed
|
[
"torch.nn.Parameter",
"torch.cos",
"torch.nn.Embedding",
"torch.sin"
] |
[((511, 549), 'torch.sin', 'torch.sin', (['self._embed_matrix[:, 0::2]'], {}), '(self._embed_matrix[:, 0::2])\n', (520, 549), False, 'import torch\n'), ((588, 626), 'torch.cos', 'torch.cos', (['self._embed_matrix[:, 1::2]'], {}), '(self._embed_matrix[:, 1::2])\n', (597, 626), False, 'import torch\n'), ((652, 696), 'torch.nn.Embedding', 'nn.Embedding', (['self._max_len', 'self._embed_dim'], {}), '(self._max_len, self._embed_dim)\n', (664, 696), True, 'import torch.nn as nn\n'), ((729, 782), 'torch.nn.Parameter', 'nn.Parameter', (['self._embed_matrix'], {'requires_grad': '(False)'}), '(self._embed_matrix, requires_grad=False)\n', (741, 782), True, 'import torch.nn as nn\n')]
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
# read dataset file
df = pd.read_csv("../data/Android_Permission.csv", header=0, delimiter=',')
# Drop the columns which have a remarkable number of identic values.
dropper = []
for col in df.columns[10:]:
if (df[col].value_counts()[0] > 28999 or df[col].value_counts()[0] < 1000):
dropper.append(col)
df = df.drop(df[dropper], axis = 1)
# Drop Related apps column
df = df.drop(['Related apps'], axis = 1)
# Drop non-existent values
df = df.dropna()
# encoding Category column
le = LabelEncoder()
df['Category'] = le.fit_transform(df['Category'])
# encoding the rest of the text type columns using malign apps patterns
## nombre de majúscules entre les tres columnes
df['App_Upper'] = df['App'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
df['Pack_Upper'] = df['Package'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
df['Description_Upper'] = df['Description'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
## nombre de punts a 'Package'
df['Pack_Periods'] = df['Package'].apply(lambda message: sum(1 for c in str(message) if '.' in c))
## paraules com "free" o "better" en el nom
df['App_Free_Better'] = df['App'].str.contains('free|better').astype(int)
df = df.drop(['App'], axis = 1)
df = df.drop(['Package'], axis = 1)
df = df.drop(['Description'], axis = 1)
|
[
"pandas.read_csv",
"sklearn.preprocessing.LabelEncoder"
] |
[((93, 163), 'pandas.read_csv', 'pd.read_csv', (['"""../data/Android_Permission.csv"""'], {'header': '(0)', 'delimiter': '""","""'}), "('../data/Android_Permission.csv', header=0, delimiter=',')\n", (104, 163), True, 'import pandas as pd\n'), ((567, 581), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (579, 581), False, 'from sklearn.preprocessing import LabelEncoder\n')]
|
from storage.models.base import *
from sqlalchemy.orm import validates
class Fighter(Base):
__tablename__ = 'fighters'
id = Column(Integer, primary_key=True)
ref = Column(String(STR_SIZE), unique=True, nullable=False)
name = Column(String(STR_SIZE), nullable=False)
country = Column(String(STR_SIZE))
city = Column(String(STR_SIZE))
birthday = Column(Date, nullable=False)
height = Column(Integer) # centimeters
weight = Column(Float) # kg
reach = Column(Integer) # centimeters
specialization = Column(String(STR_SIZE))
fights = relationship(
"Fight",
primaryjoin="or_(Fighter.id == Fight.fighter1_id, Fighter.id == Fight.fighter2_id)")
@validates('height')
def validate_height(self, key, height):
assert height > 0
return height
@validates('weight')
def validate_weight(self, key, weight):
assert weight > 0
return weight
@validates('reach')
def validate_reach(self, key, reach):
assert reach > 0
return reach
|
[
"sqlalchemy.orm.validates"
] |
[((722, 741), 'sqlalchemy.orm.validates', 'validates', (['"""height"""'], {}), "('height')\n", (731, 741), False, 'from sqlalchemy.orm import validates\n'), ((840, 859), 'sqlalchemy.orm.validates', 'validates', (['"""weight"""'], {}), "('weight')\n", (849, 859), False, 'from sqlalchemy.orm import validates\n'), ((958, 976), 'sqlalchemy.orm.validates', 'validates', (['"""reach"""'], {}), "('reach')\n", (967, 976), False, 'from sqlalchemy.orm import validates\n')]
|
# -*- coding: utf-8 -*-
"""
Created on January 24, 2018
@author: neerbek
"""
# -*- coding: utf-8 -*-
import os
os.chdir("../../taboo-core")
from numpy.random import RandomState # type: ignore
from sklearn.cluster import KMeans # type: ignore
import ai_util
import confusion_matrix
import kmeans_cluster_util as kutil
import similarity.load_trees as load_trees
# import pylab # type: ignore
import matplotlib.pyplot as plt
import importlib
# importlib.reload(kutil)
importlib.reload(confusion_matrix)
# for information type 203
#
#
# run rnn on data (very low emb size)
# OMP_NUM_THREADS=3 ipython3 functionality/train_model.py -- -traintrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -validtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -testtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -nx 50 -nh 20 -lr 0.5 -L1_reg 0 -L2_reg 0 -n_epochs -1 -retain_probability 1 -batch_size 90 -valid_batch_size 300 -glove_path ../code/glove/ -train_report_frequency 445/5 -validation_frequency 445 -file_prefix save_exp164
# Epoch 114. On validation set: Best (110, 1.065507, 77.4675%)
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings -max_tree_count 100 -max_count 100 -max_embedding_count 10000
#
# for realz
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > train.txt
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > dev.txt
#
# zip output_embeddings_exp164_e480.zip train.txt dev.txt
# rm train.txt dev.txt
# mv output_embeddings_exp164_e480.zip output
# don't add to git (for now), we should make a backup
totaltimer = ai_util.Timer("Total time: ")
traintimer = ai_util.Timer("Train time: ")
totaltimer.begin()
inputfileTrain = "output/output_embeddings_exp164_e120.zip$train.txt"
inputfileTrain = "output/output_embeddings_exp164_e480.zip$train.txt"
linesTrainFull = confusion_matrix.read_embeddings(inputfileTrain, max_line_count=-1)
linesTrain = [linesTrainFull[i] for i in range(60000)]
inputfileDev = "output/output_embeddings_exp164_e120.zip$dev.txt"
inputfileDev = "output/output_embeddings_exp164_e480.zip$dev.txt"
linesDev = confusion_matrix.read_embeddings(inputfileDev, max_line_count=-1)
numberOfClusters = 35
randomSeed = 7485
doShow = True
low = 3 # 03
high = 22 # 16 not good
rng = RandomState(randomSeed)
aTrain = confusion_matrix.get_embedding_matrix(linesTrain, normalize=True)
aTrainFull = confusion_matrix.get_embedding_matrix(linesTrainFull, normalize=True)
aDev = confusion_matrix.get_embedding_matrix(linesDev, normalize=True)
kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrainFull)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aDev)
# sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrainFull, linesTrainFull, kmeans)
sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)
show = kutil.SHOW_ALL
if doShow:
# plot
y1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)
y2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)
# y1 = kutil.get_cluster_sen_ratios(aTrainFull, linesTrainFull, kmeans, sort_order)
# y2 = getScaledSizes(aTrainFull, kmeans, sort_order)
# y3 = kutil.get_cluster_sen_ratios(aDev, linesDev, kmeans, sort_order)
# y4 = getScaledSizes(aDev, kmeans, sort_order)
x = kutil.getXRange(show, low, high, numberOfClusters)
y1 = [y1[i] for i in x]
y2 = [y2[i] for i in x]
# y3 = [y3[i] for i in x]
# y4 = [y4[i] for i in x]
confusion_matrix.new_graph('Clusters', 'Ratio')
plt.plot(x, y1, 'k-', label='Sensitivity')
plt.plot(x, y2, 'k+', label='Accumulate size')
# plt.plot(x, y3, 'b-', label='Sensitivity Dev')
# plt.plot(x, y4, 'b+', label='Accumulate size Dev')
if show == kutil.SHOW_ALL:
# plt.plot((low, low), (0, 1), 'k-')
plt.plot((high, high), (0, 1), 'k:')
plt.legend()
plt.savefig('ijcai18_plot_sensitive_sorted_203.eps')
# plt.savefig('tmp.eps')
# plt.show() don't call show from an interactive prompt :(
# https://github.com/matplotlib/matplotlib/issues/8505/
clusterIds = sort_order # clusterIds == sort_order, it's just syntaxtic sugar
(linesC1, aC1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesTrainFull, aTrainFull, kmeans)
(linesC2, aC2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesTrainFull, aTrainFull, kmeans)
(lines2C1, a2C1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesDev, aDev, kmeans)
(lines2C2, a2C2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesDev, aDev, kmeans)
print(len(linesC1), len(linesC2))
print(len(lines2C1), len(lines2C2))
# after some iterations (unknown random seed)
# 78442 45824
# 17034 9966 (27000)
kutil.get_base_accuracy(linesC1, "train C1").report()
kutil.get_base_accuracy(linesC2, "train C2").report()
# if we want to validation score
kutil.get_base_accuracy(lines2C1, "dev C1").report()
kutil.get_base_accuracy(lines2C2, "dev C2").report()
# don't know if these values are updated!
# Accuracy (train C1): 0.9432 (0.6436), f1=0.9179 (24901 1398 49089 3054)
# Accuracy (train C2): 0.9871 (0.0128), f1=0.9935 (45224 579 8 13)
# Accuracy (dev C1): 0.9304 (0.6318), f1=0.9023 (5470 383 10379 802)
# Accuracy (dev C2): 0.9832 (0.0167), f1=0.9915 (9796 163 3 4)
|
[
"confusion_matrix.read_embeddings",
"ai_util.Timer",
"matplotlib.pyplot.plot",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.legend",
"kmeans_cluster_util.get_base_accuracy",
"numpy.random.RandomState",
"kmeans_cluster_util.get_cluster_sen_ratios",
"importlib.reload",
"confusion_matrix.new_graph",
"kmeans_cluster_util.get_cluster_sen_ratios_sort_order",
"kmeans_cluster_util.getScaledSizes",
"kmeans_cluster_util.get_sentences_from_clusters",
"kmeans_cluster_util.getXRange",
"confusion_matrix.get_embedding_matrix",
"os.chdir",
"matplotlib.pyplot.savefig"
] |
[((114, 142), 'os.chdir', 'os.chdir', (['"""../../taboo-core"""'], {}), "('../../taboo-core')\n", (122, 142), False, 'import os\n'), ((473, 507), 'importlib.reload', 'importlib.reload', (['confusion_matrix'], {}), '(confusion_matrix)\n', (489, 507), False, 'import importlib\n'), ((2524, 2553), 'ai_util.Timer', 'ai_util.Timer', (['"""Total time: """'], {}), "('Total time: ')\n", (2537, 2553), False, 'import ai_util\n'), ((2567, 2596), 'ai_util.Timer', 'ai_util.Timer', (['"""Train time: """'], {}), "('Train time: ')\n", (2580, 2596), False, 'import ai_util\n'), ((2774, 2841), 'confusion_matrix.read_embeddings', 'confusion_matrix.read_embeddings', (['inputfileTrain'], {'max_line_count': '(-1)'}), '(inputfileTrain, max_line_count=-1)\n', (2806, 2841), False, 'import confusion_matrix\n'), ((3040, 3105), 'confusion_matrix.read_embeddings', 'confusion_matrix.read_embeddings', (['inputfileDev'], {'max_line_count': '(-1)'}), '(inputfileDev, max_line_count=-1)\n', (3072, 3105), False, 'import confusion_matrix\n'), ((3210, 3233), 'numpy.random.RandomState', 'RandomState', (['randomSeed'], {}), '(randomSeed)\n', (3221, 3233), False, 'from numpy.random import RandomState\n'), ((3243, 3308), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesTrain'], {'normalize': '(True)'}), '(linesTrain, normalize=True)\n', (3280, 3308), False, 'import confusion_matrix\n'), ((3322, 3391), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesTrainFull'], {'normalize': '(True)'}), '(linesTrainFull, normalize=True)\n', (3359, 3391), False, 'import confusion_matrix\n'), ((3399, 3462), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesDev'], {'normalize': '(True)'}), '(linesDev, normalize=True)\n', (3436, 3462), False, 'import confusion_matrix\n'), ((3799, 3866), 'kmeans_cluster_util.get_cluster_sen_ratios_sort_order', 'kutil.get_cluster_sen_ratios_sort_order', (['aTrain', 'linesTrain', 'kmeans'], {}), '(aTrain, linesTrain, kmeans)\n', (3838, 3866), True, 'import kmeans_cluster_util as kutil\n'), ((5202, 5294), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[:high]', 'linesTrainFull', 'aTrainFull', 'kmeans'], {}), '(clusterIds[:high], linesTrainFull,\n aTrainFull, kmeans)\n', (5235, 5294), True, 'import kmeans_cluster_util as kutil\n'), ((5308, 5400), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[high:]', 'linesTrainFull', 'aTrainFull', 'kmeans'], {}), '(clusterIds[high:], linesTrainFull,\n aTrainFull, kmeans)\n', (5341, 5400), True, 'import kmeans_cluster_util as kutil\n'), ((5416, 5492), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[:high]', 'linesDev', 'aDev', 'kmeans'], {}), '(clusterIds[:high], linesDev, aDev, kmeans)\n', (5449, 5492), True, 'import kmeans_cluster_util as kutil\n'), ((5512, 5588), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[high:]', 'linesDev', 'aDev', 'kmeans'], {}), '(clusterIds[high:], linesDev, aDev, kmeans)\n', (5545, 5588), True, 'import kmeans_cluster_util as kutil\n'), ((3922, 3990), 'kmeans_cluster_util.get_cluster_sen_ratios', 'kutil.get_cluster_sen_ratios', (['aTrain', 'linesTrain', 'kmeans', 'sort_order'], {}), '(aTrain, linesTrain, kmeans, sort_order)\n', (3950, 3990), True, 'import kmeans_cluster_util as kutil\n'), ((4000, 4048), 'kmeans_cluster_util.getScaledSizes', 'kutil.getScaledSizes', (['aTrain', 'kmeans', 'sort_order'], {}), '(aTrain, kmeans, sort_order)\n', (4020, 4048), True, 'import kmeans_cluster_util as kutil\n'), ((4331, 4381), 'kmeans_cluster_util.getXRange', 'kutil.getXRange', (['show', 'low', 'high', 'numberOfClusters'], {}), '(show, low, high, numberOfClusters)\n', (4346, 4381), True, 'import kmeans_cluster_util as kutil\n'), ((4502, 4549), 'confusion_matrix.new_graph', 'confusion_matrix.new_graph', (['"""Clusters"""', '"""Ratio"""'], {}), "('Clusters', 'Ratio')\n", (4528, 4549), False, 'import confusion_matrix\n'), ((4554, 4596), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""k-"""'], {'label': '"""Sensitivity"""'}), "(x, y1, 'k-', label='Sensitivity')\n", (4562, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4601, 4647), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""k+"""'], {'label': '"""Accumulate size"""'}), "(x, y2, 'k+', label='Accumulate size')\n", (4609, 4647), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4895), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4893, 4895), True, 'import matplotlib.pyplot as plt\n'), ((4900, 4952), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ijcai18_plot_sensitive_sorted_203.eps"""'], {}), "('ijcai18_plot_sensitive_sorted_203.eps')\n", (4911, 4952), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3526), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numberOfClusters', 'random_state': 'rng'}), '(n_clusters=numberOfClusters, random_state=rng)\n', (3479, 3526), False, 'from sklearn.cluster import KMeans\n'), ((4842, 4878), 'matplotlib.pyplot.plot', 'plt.plot', (['(high, high)', '(0, 1)', '"""k:"""'], {}), "((high, high), (0, 1), 'k:')\n", (4850, 4878), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5786), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['linesC1', '"""train C1"""'], {}), "(linesC1, 'train C1')\n", (5765, 5786), True, 'import kmeans_cluster_util as kutil\n'), ((5796, 5840), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['linesC2', '"""train C2"""'], {}), "(linesC2, 'train C2')\n", (5819, 5840), True, 'import kmeans_cluster_util as kutil\n'), ((5883, 5926), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['lines2C1', '"""dev C1"""'], {}), "(lines2C1, 'dev C1')\n", (5906, 5926), True, 'import kmeans_cluster_util as kutil\n'), ((5936, 5979), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['lines2C2', '"""dev C2"""'], {}), "(lines2C2, 'dev C2')\n", (5959, 5979), True, 'import kmeans_cluster_util as kutil\n')]
|
"""
* Copyright (c) 2022, <NAME> <<EMAIL>>
*
* SPDX-License-Identifier: BSD-2-Clause
Compiles a database of proxy servers with their respective metadata.
Links:
https://geonode.com/free-proxy-list
https://proxylist.geonode.com/api/proxy-list?limit=1000&page=1
Custom proxy key value pair:
key = ip (192.168.127.12)
value = (this dict)
{
"port":
"anonymityLevel":
"protocols": [list of supported protocols]
"google": (true or false if google approved proxy)
"org": [list of all orgs and asn]
"latency":
"responseTime":
"upTime":
"upTimeTryCount":
"created_at":
"updated_at":
"hostname":
"city":
"region":
"postal":
"country":
"timezone":
"loc":
"corruptionindex":
"entry_time":
}
"""
from typing import Iterable, Any
from math import ceil
from datetime import timedelta
from json import loads
from aiohttp import ClientSession
from ipinfo import create_ip_info_parser
from asynchttprequest import AsyncRequest, run_async_requests, ParseRequest
from database import Database
from curlget import curl_get_json
from requestlogging import log_request, get_default_logger, log_db_entry_status
from utility import try_get_key, extract_keys, str_join
from config import IP_DB_NAME, PROXY_DB_NAME
PROXYLIST_RESPONSE_KEYS = (
"anonymityLevel",
"protocols",
"google",
"org",
"speed",
"latency",
"responseTime",
"upTime",
"upTimeTryCount",
"created_at",
"updated_at",
)
def forge_proxy_entry(ip_info: dict[str, str], proxylist: dict[str, str]) -> dict[str, Any]:
"""
Creates the custom database entry for a proxies data.
"""
db_entry = {**extract_keys(proxylist, PROXYLIST_RESPONSE_KEYS), **ip_info}
# Creates string of all possible ip origin names.
db_entry["org"] = ";".join(
origin
for origin in (
try_get_key("org", ip_info),
try_get_key("asn", proxylist),
try_get_key("org", proxylist),
try_get_key("isp", proxylist),
)
if origin is not None
)
db_entry["created_at"].replace("T", " ").replace("Z", "")
db_entry["updated_at"].replace("T", " ").replace("Z", "")
# db_entry["corruptionindex"] = get_corruption_index(ip_info["country"])
return db_entry
def create_proxy_data_parser(
proxy_db: Database, ip_db: Database, proxy_expire_time: timedelta, ip_expire_time: timedelta
) -> ParseRequest:
parse_ip_info = create_ip_info_parser(ip_db, ip_expire_time)
async def parse_proxy_data(session: ClientSession, proxy_data: dict[str, str]) -> None:
"""
Retrieves and stores a proxies data, including it's ip address data separetly.
"""
ip_address = proxy_data["ip"]
await parse_ip_info(session, ip_address)
ip_and_port = f"{ip_address}:{proxy_data['port']}"
if proxy_db.key_expired(ip_and_port, proxy_expire_time):
ip_info = ip_db.get(ip_address)
db_entry = forge_proxy_entry(ip_info, proxy_data)
proxy_db.store_entry(ip_and_port, db_entry)
return parse_proxy_data
def fetch_proxylist(page_limit: int, request_limit: int) -> Iterable[dict[str, str]]:
"""
Asynchronosly requests a list of proxies from proxylist.geonode.com.
"""
base_url = "https://proxylist.geonode.com"
api_ref_template = "/api/proxy-list?limit={}&page={{}}"
proxylist_api_template = api_ref_template.format(page_limit)
single_proxy_query_url = str_join(base_url, api_ref_template.format(1, 1))
log = get_default_logger()
request = AsyncRequest("GET", "", headers={"Accept": "application/json"})
responses = []
def fetch_page_range():
# Get the range of page numbers to use for requesting all proxies
# currently available from the api.
response = curl_get_json(single_proxy_query_url)
if response is None:
log.error("Could not fetch proxy count from %s", single_proxy_query_url)
return range(0)
proxy_count = response["total"]
request_count = ceil(proxy_count / page_limit)
return range(1, request_count + 1)
async def proxylist_request(session: ClientSession, page_number: int):
request.url = proxylist_api_template.format(page_number)
resp = await log_request(request, session)
# If response is none, an error occurred and the fetch could not be made.
if resp is None:
log.warning("Could not fetch proxylist from %s", request.url)
return
# Response contains a key data with all the proxies data.
proxylist_data = loads(resp)["data"]
log.info("Fetched %d proxies from page %d", len(proxylist_data), page_number)
responses.append(proxylist_data)
run_async_requests(
fetch_page_range(), proxylist_request, base_url=base_url, limit=request_limit
)
# Each request contains a data key which holds the proxy list.
return (proxy_data for proxylist in responses for proxy_data in proxylist)
def proxy_scraper(
proxy_db: Database,
ip_db: Database,
proxy_expire_time: timedelta,
ip_expire_time: timedelta,
limit: int,
):
proxylist = fetch_proxylist(100, limit)
prev_proxy_db_count = proxy_db.get_count()
prev_ip_db_count = ip_db.get_count()
run_async_requests(
proxylist,
create_proxy_data_parser(proxy_db, ip_db, proxy_expire_time, ip_expire_time),
limit=limit,
)
# Log new ip and proxies entries.
new_proxies_count = proxy_db.get_count() - prev_proxy_db_count
new_ips_count = ip_db.get_count() - prev_ip_db_count
log_db_entry_status(new_proxies_count, PROXY_DB_NAME)
log_db_entry_status(new_ips_count, IP_DB_NAME)
|
[
"requestlogging.log_db_entry_status",
"json.loads",
"utility.extract_keys",
"requestlogging.get_default_logger",
"math.ceil",
"curlget.curl_get_json",
"asynchttprequest.AsyncRequest",
"requestlogging.log_request",
"utility.try_get_key",
"ipinfo.create_ip_info_parser"
] |
[((2571, 2615), 'ipinfo.create_ip_info_parser', 'create_ip_info_parser', (['ip_db', 'ip_expire_time'], {}), '(ip_db, ip_expire_time)\n', (2592, 2615), False, 'from ipinfo import create_ip_info_parser\n'), ((3662, 3682), 'requestlogging.get_default_logger', 'get_default_logger', ([], {}), '()\n', (3680, 3682), False, 'from requestlogging import log_request, get_default_logger, log_db_entry_status\n'), ((3697, 3760), 'asynchttprequest.AsyncRequest', 'AsyncRequest', (['"""GET"""', '""""""'], {'headers': "{'Accept': 'application/json'}"}), "('GET', '', headers={'Accept': 'application/json'})\n", (3709, 3760), False, 'from asynchttprequest import AsyncRequest, run_async_requests, ParseRequest\n'), ((5770, 5823), 'requestlogging.log_db_entry_status', 'log_db_entry_status', (['new_proxies_count', 'PROXY_DB_NAME'], {}), '(new_proxies_count, PROXY_DB_NAME)\n', (5789, 5823), False, 'from requestlogging import log_request, get_default_logger, log_db_entry_status\n'), ((5828, 5874), 'requestlogging.log_db_entry_status', 'log_db_entry_status', (['new_ips_count', 'IP_DB_NAME'], {}), '(new_ips_count, IP_DB_NAME)\n', (5847, 5874), False, 'from requestlogging import log_request, get_default_logger, log_db_entry_status\n'), ((1778, 1826), 'utility.extract_keys', 'extract_keys', (['proxylist', 'PROXYLIST_RESPONSE_KEYS'], {}), '(proxylist, PROXYLIST_RESPONSE_KEYS)\n', (1790, 1826), False, 'from utility import try_get_key, extract_keys, str_join\n'), ((3946, 3983), 'curlget.curl_get_json', 'curl_get_json', (['single_proxy_query_url'], {}), '(single_proxy_query_url)\n', (3959, 3983), False, 'from curlget import curl_get_json\n'), ((4192, 4222), 'math.ceil', 'ceil', (['(proxy_count / page_limit)'], {}), '(proxy_count / page_limit)\n', (4196, 4222), False, 'from math import ceil\n'), ((4428, 4457), 'requestlogging.log_request', 'log_request', (['request', 'session'], {}), '(request, session)\n', (4439, 4457), False, 'from requestlogging import log_request, get_default_logger, log_db_entry_status\n'), ((4751, 4762), 'json.loads', 'loads', (['resp'], {}), '(resp)\n', (4756, 4762), False, 'from json import loads\n'), ((1976, 2003), 'utility.try_get_key', 'try_get_key', (['"""org"""', 'ip_info'], {}), "('org', ip_info)\n", (1987, 2003), False, 'from utility import try_get_key, extract_keys, str_join\n'), ((2017, 2046), 'utility.try_get_key', 'try_get_key', (['"""asn"""', 'proxylist'], {}), "('asn', proxylist)\n", (2028, 2046), False, 'from utility import try_get_key, extract_keys, str_join\n'), ((2060, 2089), 'utility.try_get_key', 'try_get_key', (['"""org"""', 'proxylist'], {}), "('org', proxylist)\n", (2071, 2089), False, 'from utility import try_get_key, extract_keys, str_join\n'), ((2103, 2132), 'utility.try_get_key', 'try_get_key', (['"""isp"""', 'proxylist'], {}), "('isp', proxylist)\n", (2114, 2132), False, 'from utility import try_get_key, extract_keys, str_join\n')]
|
import gdbremote_testcase
import lldbgdbserverutils
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def test_qProcessInfo_returns_running_process(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
pid_text = process_info.get("pid")
self.assertIsNotNone(pid_text)
pid = int(pid_text, base=16)
self.assertNotEqual(0, pid)
# If possible, verify that the process is running.
self.assertTrue(lldbgdbserverutils.process_is_running(pid, True))
def test_attach_commandline_qProcessInfo_reports_correct_pid(self):
self.build()
self.set_inferior_startup_attach()
procs = self.prep_debug_monitor_and_inferior()
self.assertIsNotNone(procs)
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id matches what we expected.
pid_text = process_info.get('pid', None)
self.assertIsNotNone(pid_text)
reported_pid = int(pid_text, base=16)
self.assertEqual(reported_pid, procs["inferior"].pid)
def test_qProcessInfo_reports_valid_endian(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
endian = process_info.get("endian")
self.assertIsNotNone(endian)
self.assertIn(endian, ["little", "big", "pdp"])
def qProcessInfo_contains_keys(self, expected_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the expected keys are present and non-None within the process
# info.
missing_key_set = set()
for expected_key in expected_key_set:
if expected_key not in process_info:
missing_key_set.add(expected_key)
self.assertEqual(
missing_key_set,
set(),
"the listed keys are missing in the qProcessInfo result")
def qProcessInfo_does_not_contain_keys(self, absent_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the unexpected keys are not present
unexpected_key_set = set()
for unexpected_key in absent_key_set:
if unexpected_key in process_info:
unexpected_key_set.add(unexpected_key)
self.assertEqual(
unexpected_key_set,
set(),
"the listed keys were present but unexpected in qProcessInfo result")
@add_test_categories(["debugserver"])
def test_qProcessInfo_contains_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype']))
@add_test_categories(["llgs"])
def test_qProcessInfo_contains_triple_ppid(self):
self.build()
self.qProcessInfo_contains_keys(set(['triple', 'parent-pid']))
@add_test_categories(["debugserver"])
def test_qProcessInfo_does_not_contain_triple(self):
self.build()
# We don't expect to see triple on darwin. If we do, we'll prefer triple
# to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup
# for the remote Host and Process.
self.qProcessInfo_does_not_contain_keys(set(['triple']))
@add_test_categories(["llgs"])
def test_qProcessInfo_does_not_contain_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype']))
|
[
"lldbgdbserverutils.process_is_running"
] |
[((1026, 1074), 'lldbgdbserverutils.process_is_running', 'lldbgdbserverutils.process_is_running', (['pid', '(True)'], {}), '(pid, True)\n', (1063, 1074), False, 'import lldbgdbserverutils\n')]
|
import tkinter as tk
from Gifhandler import *
#Main window
top=tk.Tk()
#Icon
top.iconbitmap('gifs/zergyicon.ico')
#Setting color
top.configure(background='gold')
#Title
top.title('Zergy')
#Fixing picture canvas (will load later)
topcanvas=tk.Canvas(top,width=250,height=250,background='gold')
topcanvas.pack()
#Open gif
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
def runwaitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
mainanimation.animate()
def runbuggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/bug.gif',30)
mainanimation.animate_noloop()
def runtraingif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/train.gif',100)
mainanimation.animate()
def rungotitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/gotit.gif',30,200,130)
mainanimation.animate_noloop()
def runboomgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/boom.gif',40,200,130)
mainanimation.animate()
def runburrowgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/burrow.gif',30,100,130)
mainanimation.animate()
def runmorechasegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/morechase.gif',30,100,130)
mainanimation.animate()
def runjumpinggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/jumping.gif',40,100,130)
mainanimation.animate()
def runannoyinggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/annoying.gif',30,-45,130)
mainanimation.animate()
def runcutegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/cute.gif',30,140,150)
mainanimation.animate()
def runchasegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/chase.gif',30,50,130)
mainanimation.animate()
gifname=input('gif: ')
vars()['run'+gifname+'gif']()
top.mainloop()
|
[
"tkinter.Canvas",
"tkinter.Tk"
] |
[((64, 71), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (69, 71), True, 'import tkinter as tk\n'), ((244, 300), 'tkinter.Canvas', 'tk.Canvas', (['top'], {'width': '(250)', 'height': '(250)', 'background': '"""gold"""'}), "(top, width=250, height=250, background='gold')\n", (253, 300), True, 'import tkinter as tk\n')]
|
import json
import pickle
import requests
from elastic.using_requests import get_gov
demo_tax_codes = pickle.load(open('error.p', 'rb'))
host = 'http://0.0.0.0:9201'
host = 'http://10.0.6.21:30152'
e_index = 'index'
e_index = 'sme_autocomplete_index_2'
e_type = 'sme_autocomplete_type'
default_link = host + '/' + e_index + '/' + e_type + '/'
def get(tax_code):
link = 'http://10.0.6.21:30152/sme_autocomplete_index/sme_autocomplete_type/_search?q=taxCode:' + tax_code + '&size=1'
response = requests.get(link)
response = json.loads(response.content.decode('utf-8'))
return response['hits']['hits'][0] if not len(response['hits']['hits']) == 0 else {}
def add_doc(data):
json_sent = data['_source']
link = 'http://0.0.0.0:9201/' + data['_index'] + '/' + data['_type'] + '/' + json_sent['taxCode']
response = requests.put(link, json=json_sent)
print()
def get_tax_codes(start_uid=0, size=10):
query_data = {"size": size, "_source": ["taxCode", "tax_code"], "query": {"match_all": {}},
"search_after": [start_uid],
"sort": [
{"_uid": "asc"}
]}
url = default_link + '_search'
response = requests.post(url=url, json=query_data)
response_data = json.loads(response.content.decode('utf-8'))
return response_data['hits']['hits']
def update(id, d):
link = default_link + id + '/_update'
script = {
"script": "ctx._source.remove('eng_name'); "
"ctx._source.remove('short_name');"
"ctx._source.remove('tax_code');"
"ctx._source.remove('name');"
"ctx._source.remove('active_status');"
"ctx._source.remove('enterprise_type');"
"ctx._source.remove('founding_date');"
"ctx._source.remove('legal_representative');"
"ctx._source.owner='" + d['legal_representative'] + "';"
"ctx._source.address='" + d['address'] + "';"
"ctx._source.engName='" + d[
'eng_name'] + "';"
"ctx._source.shortName='" + d['short_name'] + "';"
"ctx._source.taxCode='" + d['tax_code'] + "';"
"ctx._source.companyName='" +
d['name'] + "';"
"ctx._source.activeStatus='" + d['active_status'] + "';"
"ctx._source.enterpriceType='" + d['enterprise_type'] + "';"
"ctx._source.foundedDate='" +
d['founding_date'] + "';"
"ctx._source.verify=1;"
}
response = requests.post(link, json=script)
if not response.status_code == 200:
print()
def main():
tax_codes = get_tax_codes()
while not len(tax_codes) == 0:
for code in tax_codes:
if len(code['_source']) == 0:
tax_code = code['_id']
else:
tax_code = code['_source']['taxCode'] if 'taxCode' in code['_source'] else code['_source']['tax_code']
print('Id: ' + code['_id'] + ', taxCode: ' + tax_code)
data = get_gov(tax_code, False)
if len(data) == 0:
print('Error')
continue
update(code['_id'], data)
last_code = tax_codes[-1]
tax_codes = get_tax_codes(start_uid=last_code['_type'] + '#' + last_code['_id'])
if __name__ == '__main__':
# for code in demo_tax_codes:
# data = get(code)
# if data == {}:
# continue
# add_doc(data)
main()
|
[
"requests.put",
"requests.post",
"elastic.using_requests.get_gov",
"requests.get"
] |
[((505, 523), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (517, 523), False, 'import requests\n'), ((845, 879), 'requests.put', 'requests.put', (['link'], {'json': 'json_sent'}), '(link, json=json_sent)\n', (857, 879), False, 'import requests\n'), ((1215, 1254), 'requests.post', 'requests.post', ([], {'url': 'url', 'json': 'query_data'}), '(url=url, json=query_data)\n', (1228, 1254), False, 'import requests\n'), ((3186, 3218), 'requests.post', 'requests.post', (['link'], {'json': 'script'}), '(link, json=script)\n', (3199, 3218), False, 'import requests\n'), ((3691, 3715), 'elastic.using_requests.get_gov', 'get_gov', (['tax_code', '(False)'], {}), '(tax_code, False)\n', (3698, 3715), False, 'from elastic.using_requests import get_gov\n')]
|
"""
Tests for package pytools.viz.dendrogram
"""
# noinspection PyPackageRequirements
import hashlib
import logging
from io import StringIO
import numpy as np
# noinspection PyPackageRequirements
import pytest
# noinspection PyPackageRequirements
import scipy.cluster.hierarchy as hc
from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree
log = logging.getLogger(__name__)
@pytest.fixture
def linkage_matrix() -> np.ndarray:
"""Create a linkage matrix."""
x = np.array([[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]])
return hc.linkage(x)
@pytest.fixture
def linkage_tree(linkage_matrix: np.ndarray) -> LinkageTree:
"""Create a linkage tree for drawing tests."""
return LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=list("ABCDEFGH"),
leaf_weights=[(w + 1) / 36 for w in range(8)],
)
def test_dendrogram_drawer_text(linkage_matrix: np.ndarray) -> None:
checksum_dendrogram_report = "32427095857f0589f68210ad4b2e8210"
leaf_names = list("ABCDEFGH")
leaf_weights = [(w + 1) / 36 for w in range(8)]
with pytest.raises(ValueError) as value_error:
LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=leaf_weights,
max_distance=1,
)
assert value_error.value.args == (
"arg max_distance=1 must be equal to or greater than the maximum distance "
"(= 4.0) in the linkage tree",
)
linkage_tree = LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=[(w + 1) / 36 for w in range(8)],
distance_label="distance",
leaf_label="label",
weight_label="weight",
)
with StringIO() as out:
dd = DendrogramDrawer(style=DendrogramReportStyle(out=out))
dd.draw(data=linkage_tree, title="Test")
report_str = str(out.getvalue())
log.debug(f"\n{report_str}")
assert (
hashlib.md5(str(report_str).encode("utf-8")).hexdigest()
) == checksum_dendrogram_report
|
[
"io.StringIO",
"pytools.viz.dendrogram.DendrogramReportStyle",
"scipy.cluster.hierarchy.linkage",
"pytest.raises",
"numpy.array",
"pytools.viz.dendrogram.LinkageTree",
"logging.getLogger"
] |
[((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((509, 558), 'numpy.array', 'np.array', (['[[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]'], {}), '([[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]])\n', (517, 558), True, 'import numpy as np\n'), ((570, 583), 'scipy.cluster.hierarchy.linkage', 'hc.linkage', (['x'], {}), '(x)\n', (580, 583), True, 'import scipy.cluster.hierarchy as hc\n'), ((1116, 1141), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1129, 1141), False, 'import pytest\n'), ((1166, 1284), 'pytools.viz.dendrogram.LinkageTree', 'LinkageTree', ([], {'scipy_linkage_matrix': 'linkage_matrix', 'leaf_names': 'leaf_names', 'leaf_weights': 'leaf_weights', 'max_distance': '(1)'}), '(scipy_linkage_matrix=linkage_matrix, leaf_names=leaf_names,\n leaf_weights=leaf_weights, max_distance=1)\n', (1177, 1284), False, 'from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree\n'), ((1782, 1792), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1790, 1792), False, 'from io import StringIO\n'), ((1837, 1867), 'pytools.viz.dendrogram.DendrogramReportStyle', 'DendrogramReportStyle', ([], {'out': 'out'}), '(out=out)\n', (1858, 1867), False, 'from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree\n')]
|
from node import *
from nodeitem import *
from math import sqrt, pow
import time
class Graph:
def __init__(self, node=[]):
self.node=node
def createNodeProperty(self, line):
return [int(line.split()[0]), int(line.split()[1])]
def createEdgeProperty(self, line):
return [int(line.split()[0]), [int(line.split()[1]), int(line.split()[2])]]
def getSucessorProperty(self,rdd):
rdd2 = list()
for values in rdd.__iter__():
rdd2.append([values[0],values[1]])
rdd2.sort()
return rdd2
#transform line into key-value pair
def lineToNode(self, line):
key=line[0]
value=line[1]
return Node(key, value)
# return the transition fuction value of a state x. QxW->Q
def getG(self, succesor):
return succesor[1]
# Euclidean distance between the state s1 and s2
def getEuclideanDistance(self, target1, target2):
return sqrt(pow(target2 - target1, 2)) #+ random.random() * 0.1
#Return the neighboard of the current state
def searchNeighboardOfNode(self, current_node, goal_node, open_list):
for next_sucessor in current_node.get_succesors():
G = self.getG(next_sucessor)
H = self.getEuclideanDistance(goal_node.get_targetID(), current_node.get_targetID())
node_item = Nodeitem(next_sucessor[0], G, H, current_node.get_targetID())
open_list.append(node_item)
#Return the most promising state
def getMinOpenListNode(self, open_list, goal_node):
min_f = open_list[0].get_f()
selected_item = open_list[0]
for i in range(1, len(open_list)):
if open_list[i].get_f() <= min_f:
min_f = open_list[i].get_f()
selected_item = open_list[i]
if open_list[i].get_targetID() == goal_node.get_targetID():
selected_item = open_list[i]
break
return selected_item
# Return the new current node
def searchNewCurrentNode(self, node_list, targetID):
current_node = Node()
for node in node_list:
if node.get_targetID() == targetID:
current_node = node
break
return current_node
# Return sequence from the close list
def extractSequenceFromCloseList(self, close_list):
inter_close_list = []
path = []
size = len(close_list)
for i in range(1, size):
if close_list[i].get_idpreviousnode() == close_list[i - 1].get_idpreviousnode():
inter_close_list.append(close_list[i - 1])
for item in inter_close_list:
close_list.remove(item)
for item in close_list:
sequence = item.get_idpreviousnode(), item.get_targetID()
path.append(sequence)
return path
# return the id of boundary nodes
def getBoundaryNodesId(self, rdd, numberPartition):
nodes_id = []
all_rdd_parts=rdd.glom().collect()
for i in range(1, numberPartition):
nodes_id.append(all_rdd_parts[i][0].get_targetID())
return nodes_id
# compute intermediate path
def A_Star_Mapper(self, rdd_part):
intermediate_open_list=[]
intermediate_close_list=[]
node_list_rdd_part= list(rdd_part.__iter__())
intermediate_init_node=node_list_rdd_part.__getitem__(0)
intermediate_goal_node=node_list_rdd_part.__getitem__(len(node_list_rdd_part)-1)
current_node = intermediate_init_node
while current_node.get_targetID() != intermediate_goal_node.get_targetID():
self.searchNeighboardOfNode(current_node, intermediate_goal_node, intermediate_open_list)
selected_item=self.getMinOpenListNode(intermediate_open_list, intermediate_goal_node)
intermediate_open_list.remove(selected_item)
intermediate_close_list.append(selected_item)
current_node = self.searchNewCurrentNode(node_list_rdd_part, selected_item.get_targetID())
intermediate_path=self.extractSequenceFromCloseList(intermediate_close_list)
return intermediate_path
|
[
"math.pow"
] |
[((996, 1021), 'math.pow', 'pow', (['(target2 - target1)', '(2)'], {}), '(target2 - target1, 2)\n', (999, 1021), False, 'from math import sqrt, pow\n')]
|
#!/usr/bin/python3
import sys
#sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
#import commands
import cv2
import fnmatch
import numpy as np
import os.path
import random
import navpy
sys.path.append('../lib')
import AC3D
import Pose
import ProjectMgr
import SRTM
import transformations
# for all the images in the project image_dir, compute the camera
# poses from the aircraft pose (and camera mounting transform).
# Project the image plane onto an SRTM (DEM) surface for our best
# layout guess (at this point before we do any matching/bundle
# adjustment work.)
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--texture-resolution', type=int, default=512, help='texture resolution (should be 2**n, so numbers like 256, 512, 1024, etc.')
parser.add_argument('--ground', type=float, help='ground elevation in meters')
parser.add_argument('--sba', action='store_true', help='use sba pose')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
ref = proj.ned_reference_lla
# setup SRTM ground interpolator
sss = SRTM.NEDGround( ref, 6000, 6000, 30 )
ac3d_steps = 8
# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm ground.
# build our local image list for placing
print(args.sba)
if not args.sba:
image_list = proj.image_list
else:
image_list = []
for image in proj.image_list:
if image.camera_pose_sba != None:
#print image.camera_pose_sba
image_list.append(image)
depth = 0.0
camw, camh = proj.cam.get_image_params()
for image in image_list:
print(image.name)
# scale the K matrix if we have scaled the images
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
IK = np.linalg.inv(K)
grid_list = []
u_list = np.linspace(0, image.width, ac3d_steps + 1)
v_list = np.linspace(0, image.height, ac3d_steps + 1)
#print "u_list:", u_list
#print "v_list:", v_list
for v in v_list:
for u in u_list:
grid_list.append( [u, v] )
print('grid_list:', grid_list)
if not args.sba:
proj_list = proj.projectVectors( IK, image.get_body2ned(),
image.get_cam2body(), grid_list )
else:
print(image.get_body2ned_sba())
proj_list = proj.projectVectors( IK, image.get_body2ned_sba(),
image.get_cam2body(), grid_list )
print('proj_list:', proj_list)
if not args.sba:
ned = image.camera_pose['ned']
else:
ned = image.camera_pose_sba['ned']
print('ned', image.camera_pose['ned'], ned)
if args.ground:
pts_ned = proj.intersectVectorsWithGroundPlane(ned,
args.ground, proj_list)
else:
pts_ned = sss.interpolate_vectors(ned, proj_list)
print("pts_3d (ned):\n", pts_ned)
# convert ned to xyz and stash the result for each image
image.grid_list = []
ground_sum = 0
for p in pts_ned:
image.grid_list.append( [p[1], p[0], -(p[2]+depth)] )
ground_sum += -p[2]
depth -= 0.01 # favor last pictures above earlier ones
# call the ac3d generator
AC3D.generate(image_list, src_dir=proj.source_dir,
project_dir=args.project, base_name='direct',
version=1.0, trans=0.1, resolution=args.texture_resolution)
if not args.ground:
print('Avg ground elevation (SRTM):', ground_sum / len(pts_ned))
|
[
"sys.path.append",
"argparse.ArgumentParser",
"numpy.linalg.inv",
"numpy.linspace",
"SRTM.NEDGround",
"AC3D.generate",
"ProjectMgr.ProjectMgr"
] |
[((224, 249), 'sys.path.append', 'sys.path.append', (['"""../lib"""'], {}), "('../lib')\n", (239, 249), False, 'import sys\n'), ((617, 685), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Set the initial camera poses."""'}), "(description='Set the initial camera poses.')\n", (640, 685), False, 'import argparse\n'), ((1094, 1129), 'ProjectMgr.ProjectMgr', 'ProjectMgr.ProjectMgr', (['args.project'], {}), '(args.project)\n', (1115, 1129), False, 'import ProjectMgr\n'), ((1223, 1258), 'SRTM.NEDGround', 'SRTM.NEDGround', (['ref', '(6000)', '(6000)', '(30)'], {}), '(ref, 6000, 6000, 30)\n', (1237, 1258), False, 'import SRTM\n'), ((3435, 3600), 'AC3D.generate', 'AC3D.generate', (['image_list'], {'src_dir': 'proj.source_dir', 'project_dir': 'args.project', 'base_name': '"""direct"""', 'version': '(1.0)', 'trans': '(0.1)', 'resolution': 'args.texture_resolution'}), "(image_list, src_dir=proj.source_dir, project_dir=args.project,\n base_name='direct', version=1.0, trans=0.1, resolution=args.\n texture_resolution)\n", (3448, 3600), False, 'import AC3D\n'), ((1948, 1964), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (1961, 1964), True, 'import numpy as np\n'), ((1998, 2041), 'numpy.linspace', 'np.linspace', (['(0)', 'image.width', '(ac3d_steps + 1)'], {}), '(0, image.width, ac3d_steps + 1)\n', (2009, 2041), True, 'import numpy as np\n'), ((2055, 2099), 'numpy.linspace', 'np.linspace', (['(0)', 'image.height', '(ac3d_steps + 1)'], {}), '(0, image.height, ac3d_steps + 1)\n', (2066, 2099), True, 'import numpy as np\n')]
|
import gc
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import torch
import torch.nn as nn
import torchvision
import sys
# To view tensorboard metrics
# tensorboard --logdir=logs --port=6006 --bind_all
from torch.utils.tensorboard import SummaryWriter
from functools import partial
from evolver import CrossoverType, MutationType, InitType, MatrixEvolver, VectorEvolver
from unet import UNet
from dataset_utils import PartitionType
from cuda_utils import maybe_get_cuda_device, clear_cuda
from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader
from ignite.contrib.handlers.tensorboard_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU
from ignite.handlers import ModelCheckpoint
from ignite.utils import setup_logger
from ignite.engine import Engine
# Define directories for data, logging and model saving.
base_dir = os.getcwd()
dataset_name = "landcover_large"
dataset_dir = os.path.join(base_dir, "data/" + dataset_name)
experiment_name = "dropout_single_point_finetuning_100_children"
model_name = "best_model_9_validation_accuracy=0.8940.pt"
model_path = os.path.join(base_dir, "logs/" + dataset_name + "/" + model_name)
log_dir = os.path.join(base_dir, "logs/" + dataset_name + "_" + experiment_name)
# Create DataLoaders for each partition of Landcover data.
dataloader_params = {
'batch_size': 8,
'shuffle': True,
'num_workers': 6,
'pin_memory': True}
partition_types = [PartitionType.TRAIN, PartitionType.VALIDATION,
PartitionType.FINETUNING, PartitionType.TEST]
data_loaders = get_landcover_dataloaders(dataset_dir,
partition_types,
dataloader_params,
force_create_dataset=False)
train_loader = data_loaders[0]
finetuning_loader = data_loaders[2]
dataloader_params['shuffle'] = False
test_loader = get_landcover_dataloader(dataset_dir, PartitionType.TEST, dataloader_params)
# Get GPU device if available.
device = maybe_get_cuda_device()
# Determine model and training params.
params = {
'max_epochs': 10,
'n_classes': 4,
'in_channels': 4,
'depth': 5,
'learning_rate': 0.001,
'log_steps': 1,
'save_top_n_models': 4,
'num_children': 100
}
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
# Create Trainer or Evaluators
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
# Determine metrics for evaluation.
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
"mean_iou": mIoU(ConfusionMatrix(num_classes = params['n_classes'])),
}
for batch in train_loader:
batch_x = batch[0]
_ = model(batch_x)
break
drop_out_layers = model.get_dropout_layers()
del model, batch_x
clear_cuda()
for layer in drop_out_layers:
layer_name = layer.name
size = layer.x_size[1:]
sizes = [size]
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
model.to(device)
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
num_channels = size[0]
evolver = VectorEvolver(num_channels,
CrossoverType.UNIFORM,
MutationType.FLIP_BIT,
InitType.RANDOM,
flip_bit_prob=0.25,
flip_bit_decay=0.5)
log_dir_test = log_dir + "_" + layer_name
def mask_from_vec(vec, matrix_size):
mask = np.ones(matrix_size)
for i in range(len(vec)):
if vec[i] == 0:
mask[i, :, :] = 0
elif vec[i] == 1:
mask[i, :, :] = 1
return mask
def dropout_finetune_step(engine, batch):
model.eval()
with torch.no_grad():
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
loss = sys.float_info.max
for i in range(params['num_children']):
model.zero_grad()
child_vec = evolver.spawn_child()
child_mask = mask_from_vec(child_vec, size)
model.set_dropout_masks({layer_name: torch.tensor(child_mask, dtype=torch.float32).to(device)})
outputs = model(batch_x)
current_loss = criterion(outputs[:, :, 127:128,127:128], batch_y[:,127:128,127:128]).item()
loss = min(loss, current_loss)
if current_loss == 0.0:
current_loss = sys.float_info.max
else:
current_loss = 1.0 / current_loss
evolver.add_child(child_vec, current_loss)
priority, best_child = evolver.get_best_child()
best_mask = mask_from_vec(best_child, size)
model.set_dropout_masks({layer_name: torch.tensor(best_mask, dtype=torch.float32).to(device)})
return loss
# Create Trainer or Evaluators
trainer = Engine(dropout_finetune_step)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
trainer.logger = setup_logger("Trainer")
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator.logger = setup_logger("Validation Evaluator")
@trainer.on(Events.ITERATION_COMPLETED(every=1))
def report_evolver_stats(engine):
priorities = np.array(evolver.get_generation_priorities())
# Take reciprocal since we needed to store priorities in min heap.
priorities = 1.0 / priorities
tb_logger.writer.add_scalar("training/evolver_count",
priorities.shape[0], engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_mean",
np.mean(priorities), engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_std",
np.std(priorities), engine.state.iteration)
evolver.update_parents()
@trainer.on(Events.EPOCH_COMPLETED)
def visualize_validation_predictions(engine):
for i, batch in enumerate(test_loader):
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
outputs = model(batch_x)
num_images = batch_x.shape[0]
batch_y_detach = batch_y.detach().cpu().numpy()
batch_x_detach = batch_x.detach().cpu().numpy()
outputs_detach = outputs.detach().cpu().numpy()
for j in range(num_images):
f, ax = plt.subplots(1, 3, figsize=(10, 4))
ax[0].imshow(np.moveaxis(batch_x_detach[j, :, :, :], [0], [2]) / 255.0)
ax[1].imshow((np.array(batch_y_detach[j, :, :])))
ax[2].imshow(np.argmax(np.moveaxis(np.array(outputs_detach[j, :, :, :]), [0],[ 2]), axis=2))
ax[0].set_title("X")
ax[1].set_title("Y")
ax[2].set_title("Predict")
f.suptitle("Layer: " + layer_name + " Itteration: " + str(engine.state.iteration) + " Image: " + str(j))
plt.show()
if i > 5:
break
break
# Tensorboard Logger setup below based on pytorch ignite example
# https://github.com/pytorch/ignite/blob/master/examples/contrib/mnist/mnist_with_tensorboard_logger.py
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
"""Callback to compute metrics on the train and validation data."""
train_evaluator.run(finetuning_loader)
validation_evaluator.run(test_loader)
def score_function(engine):
"""Function to determine the metric upon which to compare model."""
return engine.state.metrics["accuracy"]
# Setup Tensor Board Logging
tb_logger = TensorboardLogger(log_dir=log_dir_test)
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
metric_names="all",
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names="all",
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach_opt_params_handler(trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
optimizer=optimizer)
model_checkpoint = ModelCheckpoint(
log_dir_test,
n_saved=params['save_top_n_models'],
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
trainer.run(finetuning_loader, max_epochs=params['max_epochs'])
tb_logger.close()
|
[
"numpy.moveaxis",
"numpy.ones",
"ignite.engine.create_supervised_evaluator",
"torch.nn.NLLLoss",
"numpy.mean",
"landcover_dataloader.get_landcover_dataloaders",
"cuda_utils.maybe_get_cuda_device",
"torch.no_grad",
"cuda_utils.clear_cuda",
"os.path.join",
"numpy.std",
"torch.load",
"evolver.VectorEvolver",
"matplotlib.pyplot.subplots",
"ignite.metrics.ConfusionMatrix",
"matplotlib.pyplot.show",
"ignite.metrics.Accuracy",
"ignite.engine.Events.ITERATION_COMPLETED",
"unet.UNet",
"os.getcwd",
"ignite.engine.Engine",
"ignite.utils.setup_logger",
"numpy.array",
"landcover_dataloader.get_landcover_dataloader",
"ignite.metrics.Loss",
"torch.tensor"
] |
[((1006, 1017), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1015, 1017), False, 'import os\n'), ((1065, 1111), 'os.path.join', 'os.path.join', (['base_dir', "('data/' + dataset_name)"], {}), "(base_dir, 'data/' + dataset_name)\n", (1077, 1111), False, 'import os\n'), ((1249, 1314), 'os.path.join', 'os.path.join', (['base_dir', "('logs/' + dataset_name + '/' + model_name)"], {}), "(base_dir, 'logs/' + dataset_name + '/' + model_name)\n", (1261, 1314), False, 'import os\n'), ((1325, 1395), 'os.path.join', 'os.path.join', (['base_dir', "('logs/' + dataset_name + '_' + experiment_name)"], {}), "(base_dir, 'logs/' + dataset_name + '_' + experiment_name)\n", (1337, 1395), False, 'import os\n'), ((1714, 1820), 'landcover_dataloader.get_landcover_dataloaders', 'get_landcover_dataloaders', (['dataset_dir', 'partition_types', 'dataloader_params'], {'force_create_dataset': '(False)'}), '(dataset_dir, partition_types, dataloader_params,\n force_create_dataset=False)\n', (1739, 1820), False, 'from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader\n'), ((2062, 2138), 'landcover_dataloader.get_landcover_dataloader', 'get_landcover_dataloader', (['dataset_dir', 'PartitionType.TEST', 'dataloader_params'], {}), '(dataset_dir, PartitionType.TEST, dataloader_params)\n', (2086, 2138), False, 'from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader\n'), ((2181, 2204), 'cuda_utils.maybe_get_cuda_device', 'maybe_get_cuda_device', ([], {}), '()\n', (2202, 2204), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2439, 2451), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (2449, 2451), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2464, 2561), 'unet.UNet', 'UNet', ([], {'in_channels': "params['in_channels']", 'n_classes': "params['n_classes']", 'depth': "params['depth']"}), "(in_channels=params['in_channels'], n_classes=params['n_classes'],\n depth=params['depth'])\n", (2468, 2561), False, 'from unet import UNet\n'), ((2679, 2691), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2689, 2691), True, 'import torch.nn as nn\n'), ((3147, 3159), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (3157, 3159), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2612, 2634), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2622, 2634), False, 'import torch\n'), ((2868, 2878), 'ignite.metrics.Accuracy', 'Accuracy', ([], {}), '()\n', (2876, 2878), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((2897, 2912), 'ignite.metrics.Loss', 'Loss', (['criterion'], {}), '(criterion)\n', (2901, 2912), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((3270, 3282), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (3280, 3282), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((3299, 3396), 'unet.UNet', 'UNet', ([], {'in_channels': "params['in_channels']", 'n_classes': "params['n_classes']", 'depth': "params['depth']"}), "(in_channels=params['in_channels'], n_classes=params['n_classes'],\n depth=params['depth'])\n", (3303, 3396), False, 'from unet import UNet\n'), ((3521, 3533), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3531, 3533), True, 'import torch.nn as nn\n'), ((3695, 3829), 'evolver.VectorEvolver', 'VectorEvolver', (['num_channels', 'CrossoverType.UNIFORM', 'MutationType.FLIP_BIT', 'InitType.RANDOM'], {'flip_bit_prob': '(0.25)', 'flip_bit_decay': '(0.5)'}), '(num_channels, CrossoverType.UNIFORM, MutationType.FLIP_BIT,\n InitType.RANDOM, flip_bit_prob=0.25, flip_bit_decay=0.5)\n', (3708, 3829), False, 'from evolver import CrossoverType, MutationType, InitType, MatrixEvolver, VectorEvolver\n'), ((5618, 5647), 'ignite.engine.Engine', 'Engine', (['dropout_finetune_step'], {}), '(dropout_finetune_step)\n', (5624, 5647), False, 'from ignite.engine import Engine\n'), ((5670, 5736), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['model'], {'metrics': 'metrics', 'device': 'device'}), '(model, metrics=metrics, device=device)\n', (5697, 5736), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((5764, 5830), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['model'], {'metrics': 'metrics', 'device': 'device'}), '(model, metrics=metrics, device=device)\n', (5791, 5830), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((5852, 5875), 'ignite.utils.setup_logger', 'setup_logger', (['"""Trainer"""'], {}), "('Trainer')\n", (5864, 5875), False, 'from ignite.utils import setup_logger\n'), ((5905, 5936), 'ignite.utils.setup_logger', 'setup_logger', (['"""Train Evaluator"""'], {}), "('Train Evaluator')\n", (5917, 5936), False, 'from ignite.utils import setup_logger\n'), ((5971, 6007), 'ignite.utils.setup_logger', 'setup_logger', (['"""Validation Evaluator"""'], {}), "('Validation Evaluator')\n", (5983, 6007), False, 'from ignite.utils import setup_logger\n'), ((2939, 2987), 'ignite.metrics.ConfusionMatrix', 'ConfusionMatrix', ([], {'num_classes': "params['n_classes']"}), "(num_classes=params['n_classes'])\n", (2954, 2987), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((3459, 3481), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3469, 3481), False, 'import torch\n'), ((4078, 4098), 'numpy.ones', 'np.ones', (['matrix_size'], {}), '(matrix_size)\n', (4085, 4098), True, 'import numpy as np\n'), ((6025, 6060), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': '(1)'}), '(every=1)\n', (6051, 6060), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((4366, 4381), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4379, 4381), False, 'import torch\n'), ((6520, 6539), 'numpy.mean', 'np.mean', (['priorities'], {}), '(priorities)\n', (6527, 6539), True, 'import numpy as np\n'), ((6661, 6679), 'numpy.std', 'np.std', (['priorities'], {}), '(priorities)\n', (6667, 6679), True, 'import numpy as np\n'), ((8732, 8785), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': "params['log_steps']"}), "(every=params['log_steps'])\n", (8758, 8785), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((9346, 9399), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': "params['log_steps']"}), "(every=params['log_steps'])\n", (9372, 9399), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((7349, 7384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 4)'}), '(1, 3, figsize=(10, 4))\n', (7361, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7906, 7916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7914, 7916), True, 'import matplotlib.pyplot as plt\n'), ((7503, 7536), 'numpy.array', 'np.array', (['batch_y_detach[j, :, :]'], {}), '(batch_y_detach[j, :, :])\n', (7511, 7536), True, 'import numpy as np\n'), ((7414, 7463), 'numpy.moveaxis', 'np.moveaxis', (['batch_x_detach[j, :, :, :]', '[0]', '[2]'], {}), '(batch_x_detach[j, :, :, :], [0], [2])\n', (7425, 7463), True, 'import numpy as np\n'), ((5486, 5530), 'torch.tensor', 'torch.tensor', (['best_mask'], {'dtype': 'torch.float32'}), '(best_mask, dtype=torch.float32)\n', (5498, 5530), False, 'import torch\n'), ((7590, 7626), 'numpy.array', 'np.array', (['outputs_detach[j, :, :, :]'], {}), '(outputs_detach[j, :, :, :])\n', (7598, 7626), True, 'import numpy as np\n'), ((4789, 4834), 'torch.tensor', 'torch.tensor', (['child_mask'], {'dtype': 'torch.float32'}), '(child_mask, dtype=torch.float32)\n', (4801, 4834), False, 'import torch\n')]
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 01, 2012
@author: <NAME>
@contact: <EMAIL>
@author: <NAME>
@contact: <EMAIL>
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='pyocni',
author='<NAME>',
author_email='<EMAIL>',
version='0.3',
description='PyOCNI: A Python implementation of an extended OCCI with a JSON serialization',
#long_description=read('README'),
url='http://www.example.com/pyocni',
#packages=['pyocni'],
packages=find_packages(), #['pyocni'],
package_data = {
# If any package contains *.txt or *.rst files, include them:
'pyocni': ['*.conf', '*.py'],
# And include any *.msg files found in the 'pyocni' package, too:
'pyocni': ['*.conf', '*.msg'],
},
install_requires=[
'config',
'configobj',
#'logging',
'ordereddict',
'simplejson',
'jsonpickle',
'routes',
'webob',
'pesto',
'eventlet',
'sphinx',
'ZODB3',
'httplib2',
'couchdb',
'couchdbkit',
'tornado'
#'pack>=0.97',
#'pack'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache License, Version 2.0',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7'
]
)
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((1258, 1273), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1271, 1273), False, 'from setuptools import setup, find_packages\n'), ((895, 920), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (910, 920), False, 'import os\n')]
|
import gc
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
|
[
"gc.isenabled",
"gc.disable",
"gc.enable"
] |
[((33, 45), 'gc.disable', 'gc.disable', ([], {}), '()\n', (43, 45), False, 'import gc\n'), ((68, 79), 'gc.enable', 'gc.enable', ([], {}), '()\n', (77, 79), False, 'import gc\n'), ((17, 31), 'gc.isenabled', 'gc.isenabled', ([], {}), '()\n', (29, 31), False, 'import gc\n'), ((52, 66), 'gc.isenabled', 'gc.isenabled', ([], {}), '()\n', (64, 66), False, 'import gc\n'), ((86, 100), 'gc.isenabled', 'gc.isenabled', ([], {}), '()\n', (98, 100), False, 'import gc\n')]
|
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'mnowotka'
# ----------------------------------------------------------------------------------------------------------------------
import sys
import argparse
from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles
from chembl_webresource_client.scripts.utils import resolve, mols_to_targets
AVAILABLE_SOURCE_FORMATS = ('chembl_id', 'sdf', 'smi')
# ----------------------------------------------------------------------------------------------------------------------
def get_options():
description = 'Find related targets for a set of compounds'
parser = argparse.ArgumentParser(description=description, prog='chembl_m2t')
parser.add_argument('-i', '--input', action='store', dest='input',
help='input file, standard input by default')
parser.add_argument('-o', '--output', action='store', dest='output',
help='output file, standard output by default')
parser.add_argument('-s', '--source-format', action='store', dest='source_format', default='csv',
help='input file format. Can be one of 3: chembl_id (a comma separated list of chembl IDs), '
'sdf: (MDL molfile), smi (file containing smiles)')
parser.add_argument('-d', '--destination-format', action='store', dest='dest_format', default='uniprot',
help='output file format. can be chosen from 3 options: '
'[uniprot, gene_name, chembl_id]')
parser.add_argument('-H', '--Human', action='store_true', dest='human',
help='human readable output: prints header and first column with original names')
parser.add_argument('-O', '--organism', action='store', dest='organism',
help='Filter results by organism')
parser.add_argument('-p', '--parent', action='store_true', dest='parent',
help='when fetching targets include also targets from parents of given molecules')
parser.add_argument('-c', '--chunk-size', action='store', dest='chunk', default='1000',
help='Size of chunk of data retrieved from API')
return parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------------
def main():
options = get_options()
source_format = options.source_format.lower()
if source_format not in AVAILABLE_SOURCE_FORMATS:
sys.stderr.write('Unsupported source format', options.source_format)
return
inp = sys.stdin
if source_format == 'sdf':
with open(options.input) if options.input else sys.stdin as in_f:
options.input = None
inp = convert_to_smiles(in_f)
with open(options.input) if options.input else inp as in_f, \
open(options.output, 'w') if options.output else sys.stdout as out_f:
serializer_cls = get_serializer(options.dest_format)
if not serializer_cls:
sys.stderr.write('Unsupported format', options.dest_format)
return
if options.human:
serializer_cls.write_header(out_f)
for line in in_f:
if not line or line.lower().startswith('smiles'):
continue
chunk = line.strip().split()[0]
identifiers = chunk.strip().split(',')
valid_identifiers = list()
for identifier in identifiers:
if chembl_id_regex.match(identifier):
valid_identifiers.append(identifier)
elif smiles_regex.match(identifier):
valid_identifiers.extend([x['molecule_chembl_id'] for x in resolve(identifier)])
targets = mols_to_targets(valid_identifiers,
organism=options.organism,
only_ids=(options.dest_format == 'chembl_id'),
include_parents=options.parent,
chunk_size=int(options.chunk))
out_f.write(serializer_cls.serialize_line(targets, human=options.human, name=','.join(valid_identifiers)))
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------------------------------------------------
|
[
"chembl_webresource_client.scripts.utils.get_serializer",
"chembl_webresource_client.scripts.utils.smiles_regex.match",
"argparse.ArgumentParser",
"chembl_webresource_client.scripts.utils.resolve",
"chembl_webresource_client.scripts.utils.convert_to_smiles",
"sys.stderr.write",
"chembl_webresource_client.scripts.utils.chembl_id_regex.match"
] |
[((707, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'prog': '"""chembl_m2t"""'}), "(description=description, prog='chembl_m2t')\n", (730, 774), False, 'import argparse\n'), ((2593, 2661), 'sys.stderr.write', 'sys.stderr.write', (['"""Unsupported source format"""', 'options.source_format'], {}), "('Unsupported source format', options.source_format)\n", (2609, 2661), False, 'import sys\n'), ((3053, 3088), 'chembl_webresource_client.scripts.utils.get_serializer', 'get_serializer', (['options.dest_format'], {}), '(options.dest_format)\n', (3067, 3088), False, 'from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles\n'), ((2854, 2877), 'chembl_webresource_client.scripts.utils.convert_to_smiles', 'convert_to_smiles', (['in_f'], {}), '(in_f)\n', (2871, 2877), False, 'from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles\n'), ((3132, 3191), 'sys.stderr.write', 'sys.stderr.write', (['"""Unsupported format"""', 'options.dest_format'], {}), "('Unsupported format', options.dest_format)\n", (3148, 3191), False, 'import sys\n'), ((3595, 3628), 'chembl_webresource_client.scripts.utils.chembl_id_regex.match', 'chembl_id_regex.match', (['identifier'], {}), '(identifier)\n', (3616, 3628), False, 'from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles\n'), ((3708, 3738), 'chembl_webresource_client.scripts.utils.smiles_regex.match', 'smiles_regex.match', (['identifier'], {}), '(identifier)\n', (3726, 3738), False, 'from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles\n'), ((3819, 3838), 'chembl_webresource_client.scripts.utils.resolve', 'resolve', (['identifier'], {}), '(identifier)\n', (3826, 3838), False, 'from chembl_webresource_client.scripts.utils import resolve, mols_to_targets\n')]
|
"""Test record form i.e. marshmallow schema is configured as expected."""
from copy import deepcopy
import pytest
from cd2h_repo_project.modules.records.marshmallow.json import (
AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1
)
@pytest.fixture
def create_input_metadatav1():
"""Factory pattern for the input to the marshmallow.json.MetadataSchemaV1.
"""
def _create_input_metadatav1(data={}):
data_to_use = {
'title': 'A title',
'authors': [
{
'first_name': 'An',
'last_name': 'author'
}
],
'description': 'A description',
'resource_type': {
'general': 'other',
'specific': 'other'
},
'license': 'mit-license',
'permissions': 'all_view',
}
data_to_use.update(data)
return data_to_use
return _create_input_metadatav1
@pytest.fixture
def create_input_record(create_input_metadatav1):
"""Factory pattern for an API input Record.
The returned dict is the input to the marshmallow loader used by the API.
"""
def _create_input_record(data=None):
data = deepcopy(data) if data else {}
data_to_use = {
'metadata': create_input_metadatav1(data.pop('metadata', {}))
}
data_to_use.update(data)
return data_to_use
return _create_input_record
class TestRecordSchemaV1(object):
def test_load_for_empty_json_contains_schema(self, appctx):
unmarshalled_record = RecordSchemaV1().load({})
assert not unmarshalled_record.errors
assert unmarshalled_record.data == {
'$schema': (
'https://localhost:5000/schemas/records/record-v0.1.0.json'
)
}
def test_load_for_valid_json_removes_metadata_envelope(
self, create_input_record):
input_record = create_input_record()
unmarshalled_record = RecordSchemaV1().load(input_record)
assert not unmarshalled_record.errors
loaded_record = unmarshalled_record.data
assert 'metadata' not in loaded_record
def test_load_for_invalid_json_returns_errors(self):
input_record = {'foo': 'bar'}
unmarshalled_record = RecordSchemaV1().load(input_record)
assert 'foo' in unmarshalled_record.errors
assert not unmarshalled_record.data
class TestMetadataSchemaV1(object):
def test_extra_key_is_ignored(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'foo': 'bar'})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
loaded_record = unmarshalled_record.data
# marshmallow does not care about additional keys
assert 'foo' not in unmarshalled_record.errors
assert 'foo' not in loaded_record
def test_missing_keys_return_errors(self):
serialized_record = {'foo': 'bar'}
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
required_keys = [
'title', 'description', 'authors', 'resource_type', 'license',
'permissions'
]
assert set(unmarshalled_record.errors.keys()) == set(required_keys)
assert (
unmarshalled_record.errors['title'] ==
['Missing data for required field.']
)
def test_authors_loaded(self, create_input_metadatav1):
authors = [
{
'first_name': 'John',
'middle_name': 'Jacob',
'last_name': 'Smith'
},
{
'first_name': 'Jane',
'middle_name': 'Janet',
'last_name': 'Doe',
'full_name': '<NAME>.' # Should be overwritten
}
]
serialized_record = create_input_metadatav1({
'authors': authors
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'authors' in deserialized_metadata
assert deserialized_metadata['authors'][0] == {
'first_name': 'John',
'middle_name': 'Jacob',
'last_name': 'Smith',
'full_name': 'Smith, <NAME>'
}
assert deserialized_metadata['authors'][1] == {
'first_name': 'Jane',
'middle_name': 'Janet',
'last_name': 'Doe',
'full_name': '<NAME>'
}
def test_resource_type_loaded(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'resource_type': {
'general': 'other',
'specific': 'other'
}
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'resource_type' in deserialized_metadata
def test_empty_required_key_returns_errors(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'title': None})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
assert 'title' in unmarshalled_record.errors
def test_description_too_short_returns_error(
self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'description': 'A '})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
assert 'description' in unmarshalled_record.errors
# WHY: We place these tests here because we plan on having terms be a
# first-class citizen of the records schema
def test_one_term_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
}
]
serialized_record = create_input_metadatav1({
'terms': [{'data': term} for term in terms]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_multiple_terms_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
},
{
'source': 'MeSH',
'value': 'Acanthamoeba',
'id': 'D000048'
}
]
serialized_record = create_input_metadatav1({
'terms': [{'data': term} for term in terms]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_no_terms_loaded(self, create_input_metadatav1):
terms = []
serialized_record = create_input_metadatav1({
'terms': terms
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
serialized_record2 = create_input_metadatav1()
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record2)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
serialized_record3 = create_input_metadatav1({
'terms': [None, {}, {'data': None}, {'data': {}}, '']
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record3)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == []
def test_incorrect_format_terms_returns_error(
self, create_input_metadatav1):
terms = ["bar"]
serialized_record = create_input_metadatav1({
'terms': terms
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert 'terms' in unmarshalled_metadata.errors
assert deserialized_metadata['terms'] == [{}]
def test_coalesce_terms_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
},
{
'source': 'FAST',
'value': 'Glucagonoma',
'id': '943672'
}
]
serialized_record = create_input_metadatav1({
'mesh_terms': [{'data': terms[0]}],
'fast_terms': [{'data': terms[1]}]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert 'mesh_terms' not in deserialized_metadata
assert 'fast_terms' not in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_permissions_loaded(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'permissions': 'restricted_view'
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert deserialized_metadata['permissions'] == 'restricted_view'
def test_invalid_permissions_returns_errors(
self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'permissions': 'foo_view'
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert 'permissions' in unmarshalled_metadata.errors
class TestAuthorSchemaV1(object):
def test_first_and_last_name_required(self):
author = {
'first_name': 'Jonathan',
}
unmarshalled_author = AuthorSchemaV1().load(author)
assert 'first_name' in unmarshalled_author.data
assert 'middle_name' not in unmarshalled_author.errors
assert 'last_name' in unmarshalled_author.errors
class TestResourceTypeSchemaV1(object):
def test_general_dataset_fills_specific_dataset(self):
resource_type = {
'general': 'dataset'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
assert not unmarshalled_resource_type.errors
assert 'general' in unmarshalled_resource_type.data
assert unmarshalled_resource_type.data['specific'] == 'dataset'
def test_valid_general_specific_combination_loads(self):
resource_type = {
'general': 'text resources',
'specific': 'letter'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
loaded_resource_type = unmarshalled_resource_type.data
assert not unmarshalled_resource_type.errors
assert loaded_resource_type['general'] == 'text resources'
assert loaded_resource_type['specific'] == 'letter'
def test_invalid_general_specific_combination_errors(self):
resource_type = {
'general': 'articles',
'specific': 'other'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
assert (
unmarshalled_resource_type.errors['_schema'][0] ==
'Invalid resource type.'
)
def test_general_specific_combination_maps_to_hierarchy(self):
resource_type = {
'general': 'text resources',
'specific': 'letter'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
loaded_resource_type = unmarshalled_resource_type.data
assert loaded_resource_type['full_hierarchy'] == ['text', 'letter']
|
[
"cd2h_repo_project.modules.records.marshmallow.json.RecordSchemaV1",
"cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1",
"copy.deepcopy",
"cd2h_repo_project.modules.records.marshmallow.json.AuthorSchemaV1",
"cd2h_repo_project.modules.records.marshmallow.json.ResourceTypeSchemaV1"
] |
[((1257, 1271), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (1265, 1271), False, 'from copy import deepcopy\n'), ((1619, 1635), 'cd2h_repo_project.modules.records.marshmallow.json.RecordSchemaV1', 'RecordSchemaV1', ([], {}), '()\n', (1633, 1635), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((2039, 2055), 'cd2h_repo_project.modules.records.marshmallow.json.RecordSchemaV1', 'RecordSchemaV1', ([], {}), '()\n', (2053, 2055), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((2345, 2361), 'cd2h_repo_project.modules.records.marshmallow.json.RecordSchemaV1', 'RecordSchemaV1', ([], {}), '()\n', (2359, 2361), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((2681, 2699), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (2697, 2699), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((3051, 3069), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (3067, 3069), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((4005, 4023), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (4021, 4023), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((4901, 4919), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (4917, 4919), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((5288, 5306), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (5304, 5306), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((5586, 5604), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (5602, 5604), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((6208, 6226), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (6224, 6226), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((6996, 7014), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (7012, 7014), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((7456, 7474), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (7472, 7474), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((7799, 7817), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (7815, 7817), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((8220, 8238), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (8236, 8238), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((8717, 8735), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (8733, 8735), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((9501, 9519), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (9517, 9519), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((10077, 10095), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (10093, 10095), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((10531, 10549), 'cd2h_repo_project.modules.records.marshmallow.json.MetadataSchemaV1', 'MetadataSchemaV1', ([], {}), '()\n', (10547, 10549), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((10878, 10894), 'cd2h_repo_project.modules.records.marshmallow.json.AuthorSchemaV1', 'AuthorSchemaV1', ([], {}), '()\n', (10892, 10894), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((11293, 11315), 'cd2h_repo_project.modules.records.marshmallow.json.ResourceTypeSchemaV1', 'ResourceTypeSchemaV1', ([], {}), '()\n', (11313, 11315), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((11732, 11754), 'cd2h_repo_project.modules.records.marshmallow.json.ResourceTypeSchemaV1', 'ResourceTypeSchemaV1', ([], {}), '()\n', (11752, 11754), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((12225, 12247), 'cd2h_repo_project.modules.records.marshmallow.json.ResourceTypeSchemaV1', 'ResourceTypeSchemaV1', ([], {}), '()\n', (12245, 12247), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n'), ((12612, 12634), 'cd2h_repo_project.modules.records.marshmallow.json.ResourceTypeSchemaV1', 'ResourceTypeSchemaV1', ([], {}), '()\n', (12632, 12634), False, 'from cd2h_repo_project.modules.records.marshmallow.json import AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1\n')]
|
import os
import sys
import torch
import argparse
from collections import OrderedDict
from dataloader import Dataset
from evaluation import Evaluator
from experiment import EarlyStop, train_model
from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
def main_train_test(argv):
# multiprocessing.set_start_method('spawn')
# read configs
config = Config(main_conf_path='./', model_conf_path='model_config')
# apply system arguments if exist
if len(argv) > 0:
cmd_arg = OrderedDict()
argvs = ' '.join(sys.argv[1:]).split(' ')
for i in range(0, len(argvs), 2):
arg_name, arg_value = argvs[i], argvs[i + 1]
arg_name = arg_name.strip('-')
cmd_arg[arg_name] = arg_value
config.update_params(cmd_arg)
gpu = config.get_param('Experiment', 'gpu')
gpu = str(gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = config.get_param('Experiment', 'model_name')
# set seed
seed = config.get_param('Experiment', 'seed')
set_random_seed(seed)
# logger
log_dir = make_log_dir(os.path.join('saves', model_name))
logger = Logger(log_dir)
config.save(log_dir)
# dataset
dataset_name = config.get_param('Dataset', 'dataset')
dataset = Dataset(model_name, **config['Dataset'])
# early stop
early_stop = EarlyStop(**config['EarlyStop'])
# evaluator()
evaluator = Evaluator(early_stop.early_stop_measure, **config['Evaluator'])
# Save log & dataset config.
logger.info(config)
logger.info(dataset)
import model
MODEL_CLASS = getattr(model, model_name)
# build model
model = MODEL_CLASS(dataset, config['Model'], device)
model.logger = logger
################################## TRAIN & PREDICT
# train
try:
valid_score, train_time = train_model(model, dataset, evaluator, early_stop, logger, config)
except (KeyboardInterrupt, SystemExit):
valid_score, train_time = dict(), 0
logger.info("학습을 중단하셨습니다.")
m, s = divmod(train_time, 60)
h, m = divmod(m, 60)
logger.info('\nTotal training time - %d:%d:%d(=%.1f sec)' % (h, m, s, train_time))
# test
model.eval()
model.restore(logger.log_dir)
test_score = dict()
for testset in dataset.testsets:
test_score.update(evaluator.evaluate(model, dataset, testset))
# show result
evaluation_table = ResultTable(table_name='Best Result', header=list(test_score.keys()))
evaluation_table.add_row('Valid', valid_score)
evaluation_table.add_row('Test', test_score)
# evaluation_table.show()
logger.info(evaluation_table.to_string())
logger.info("Saved to %s" % (log_dir))
def main_submit(args):
# read configs
config = Config(main_conf_path=args.path, model_conf_path=args.path)
# Final test set (dataset/problemsheet.json)
config.main_config['Dataset']['dataset'] = '/home/agc2021/dataset'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = config.get_param('Experiment', 'model_name')
log_dir = args.path
logger = Logger(log_dir)
dataset = Dataset(model_name, **config['Dataset'])
# evaluator
evaluator = Evaluator(**config['Evaluator'])
import model
MODEL_CLASS = getattr(model, model_name)
# build model
model = MODEL_CLASS(dataset, config['Model'], device)
# test
model.eval()
model.restore(logger.log_dir)
model.logger = logger
evaluator.evaluate(model, dataset, 'submit')
logger.info("Saved answer")
if __name__ == '__main__':
## For submission
if os.path.exists('/home/agc2021/dataset/problemsheet_5_00.json'):
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='saves_final/', metavar='P')
args = parser.parse_args()
main_submit(args)
else:
main_train_test(argv=sys.argv[1:])
|
[
"experiment.EarlyStop",
"argparse.ArgumentParser",
"utils.Config",
"evaluation.Evaluator",
"utils.set_random_seed",
"dataloader.Dataset",
"model.restore",
"os.path.exists",
"experiment.train_model",
"torch.cuda.is_available",
"model.eval",
"collections.OrderedDict",
"utils.Logger",
"os.path.join"
] |
[((431, 490), 'utils.Config', 'Config', ([], {'main_conf_path': '"""./"""', 'model_conf_path': '"""model_config"""'}), "(main_conf_path='./', model_conf_path='model_config')\n", (437, 490), False, 'from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed\n'), ((1176, 1197), 'utils.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (1191, 1197), False, 'from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed\n'), ((1287, 1302), 'utils.Logger', 'Logger', (['log_dir'], {}), '(log_dir)\n', (1293, 1302), False, 'from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed\n'), ((1415, 1455), 'dataloader.Dataset', 'Dataset', (['model_name'], {}), "(model_name, **config['Dataset'])\n", (1422, 1455), False, 'from dataloader import Dataset\n'), ((1491, 1523), 'experiment.EarlyStop', 'EarlyStop', ([], {}), "(**config['EarlyStop'])\n", (1500, 1523), False, 'from experiment import EarlyStop, train_model\n'), ((1559, 1622), 'evaluation.Evaluator', 'Evaluator', (['early_stop.early_stop_measure'], {}), "(early_stop.early_stop_measure, **config['Evaluator'])\n", (1568, 1622), False, 'from evaluation import Evaluator\n'), ((2342, 2354), 'model.eval', 'model.eval', ([], {}), '()\n', (2352, 2354), False, 'import model\n'), ((2359, 2388), 'model.restore', 'model.restore', (['logger.log_dir'], {}), '(logger.log_dir)\n', (2372, 2388), False, 'import model\n'), ((2918, 2977), 'utils.Config', 'Config', ([], {'main_conf_path': 'args.path', 'model_conf_path': 'args.path'}), '(main_conf_path=args.path, model_conf_path=args.path)\n', (2924, 2977), False, 'from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed\n'), ((3274, 3289), 'utils.Logger', 'Logger', (['log_dir'], {}), '(log_dir)\n', (3280, 3289), False, 'from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed\n'), ((3305, 3345), 'dataloader.Dataset', 'Dataset', (['model_name'], {}), "(model_name, **config['Dataset'])\n", (3312, 3345), False, 'from dataloader import Dataset\n'), ((3379, 3411), 'evaluation.Evaluator', 'Evaluator', ([], {}), "(**config['Evaluator'])\n", (3388, 3411), False, 'from evaluation import Evaluator\n'), ((3568, 3580), 'model.eval', 'model.eval', ([], {}), '()\n', (3578, 3580), False, 'import model\n'), ((3585, 3614), 'model.restore', 'model.restore', (['logger.log_dir'], {}), '(logger.log_dir)\n', (3598, 3614), False, 'import model\n'), ((3781, 3843), 'os.path.exists', 'os.path.exists', (['"""/home/agc2021/dataset/problemsheet_5_00.json"""'], {}), "('/home/agc2021/dataset/problemsheet_5_00.json')\n", (3795, 3843), False, 'import os\n'), ((570, 583), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (581, 583), False, 'from collections import OrderedDict\n'), ((1239, 1272), 'os.path.join', 'os.path.join', (['"""saves"""', 'model_name'], {}), "('saves', model_name)\n", (1251, 1272), False, 'import os\n'), ((1988, 2054), 'experiment.train_model', 'train_model', (['model', 'dataset', 'evaluator', 'early_stop', 'logger', 'config'], {}), '(model, dataset, evaluator, early_stop, logger, config)\n', (1999, 2054), False, 'from experiment import EarlyStop, train_model\n'), ((3862, 3887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3885, 3887), False, 'import argparse\n'), ((1006, 1031), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1029, 1031), False, 'import torch\n'), ((3136, 3161), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3159, 3161), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# We are going to be doing an activity about viewing images through different filters. These filters are similar to things that happen in the brain when the images from our eyes are registered in our brain.
# <codecell>
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.signal as signal
import numpy as n
# <codecell>
barImg=mpimg.imread('bar.png')
#extract grey values
barImg = barImg[:,:,3]
# <markdowncell>
# We examine the effect on the following images. In the visual pathway the images can be seen as input from our eyes focusing on the center of our vision.
# <codecell>
imgplot = plt.imshow(barImg, cmap=cm.Greys_r)
# <codecell>
img=mpimg.imread('stinkbug.png') #change 'stinkbug.png' into your choice of animal:
# turtle.jpg, turtle2.jpg, zebra.png, doge.png, jaguar.png, leopard.png, mexicanhat.jpg
#extract grey values
bugImg = img[:,:,0]
# <codecell>
imgplot = plt.imshow(bugImg, cmap=cm.Greys_r)
# <markdowncell>
# Receptive field functions
# -------------------
#
# The following function will be used as a blurring filter.
# $$\phi(x,y) = \frac{1}{2\pi\sigma^2}\exp{\{-\frac{1}{2\pi\sigma^2}(x^2+ y^2)\}}$$
# <codecell>
def gaussian2D(x, y, sigma):
return (1.0/(1*math.pi*(sigma**2)))*math.exp(-(1.0/(2*(sigma**2)))*(x**2 + y**2))
"""make matrix from function"""
def receptiveFieldMatrix(func):
h = 30
g = zeros((h,h))
for xi in range(0,h):
for yi in range(0,h):
x = xi-h/2
y = yi-h/2
g[xi, yi] = func(x,y);
return g
def plotFilter(fun):
g = receptiveFieldMatrix(fun)
plt.imshow(g, cmap=cm.Greys_r)
# <markdowncell>
# The function is circular symmetric, meaning it is doing the same thing around a circle.
#
# This filter cancels out higher frequencies, thus blurring the image.
# <codecell>
plotFilter(lambda x,y:gaussian2D(x,y,4))
# <markdowncell>
# Convolution is the process of applying the filter to the input image.
# $$\int \int I(x',y')\phi(x-x',y-y')dx'dy'$$
#
# When applying this filter, the result of the convolution can be visualized in an image.
# <codecell>
Img_barGaussian = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: gaussian2D(x,y,5)), mode='same')
imgplot = plt.imshow(Img_barGaussian, cmap=cm.Greys_r)
# <codecell>
Img_bugGaussian = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: gaussian2D(x,y,3)), mode='same')
imgplot = plt.imshow(Img_bugGaussian, cmap=cm.Greys_r)
# <markdowncell>
# Difference of Gaussians
# ---------------------
#
# The mexican hat function is a difference between two of the function above, which leads to a filter that happens in certain cells in your eye. It can be seen as a basic edge detector.
# <codecell>
def mexicanHat(x,y,sigma1,sigma2):
return gaussian2D(x,y,sigma1) - gaussian2D(x,y,sigma2)
plotFilter(lambda x,y: mexicanHat(x,y,3,4))
# <codecell>
Img_barHat = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y:mexicanHat(x,y,3,4)), mode='same')
imgplot = plt.imshow(Img_barHat, cmap=cm.Greys_r)
# <codecell>
Img_bugHat = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: mexicanHat(x,y,2,3)), mode='same')
imgplot = plt.imshow(Img_bugHat, cmap=cm.Greys_r)
# <markdowncell>
# Gabor functions
# ---------------
#
# Gabor functions are used to detect edges with a specific orientation in images. There are parts in the brain that see an image through these gabor functions and are found throughout a part of your eye.
#
# There are two different types of gabor function:
# $$g_s(x):=sin(\omega_x x + \omega_y y)\exp{\{-\frac{x^2+y^2}{2\sigma^2}\}}$$
# $$g_c(x):=cos(\omega_x x + \omega_y y)\exp{\{-\frac{x^2+y^2}{2\sigma^2}\}}$$
#
# <codecell>
def oddGabor2D(x,y,sigma,orientation):
return math.sin(x + orientation*y) * math.exp(-(x**2 + y**2)/(2*sigma))
def evenGabor2D(x,y, sigma, orientation):
return math.cos(x + orientation*y) * math.exp(-(x**2 + y**2)/(2*sigma))
plotFilter(lambda x,y: oddGabor2D(x,y,7,1))
# <codecell>
Img_barOddGabor = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_barOddGabor, cmap=cm.Greys_r)
# <codecell>
Img_bugOddGabor = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
# <markdowncell>
# In the following image one can see the edge orientations appear in the part of the eye.
# <codecell>
imgplot = plt.imshow(Img_bugOddGabor, cmap=cm.Greys_r)
# <markdowncell>
# Using the previous filter (the edge defining one) as an input to the gabor we obtain different results.
# <codecell>
Img_bugOddGaborEdge = signal.convolve(Img_bugHat,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_bugOddGaborEdge, cmap=cm.Greys_r)
# <markdowncell>
# Here is an example of the other gabor filter
# <codecell>
plotFilter(lambda x,y: evenGabor2D(x,y,7,1))
Img_barEvenGabor = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: evenGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_barEvenGabor, cmap=cm.Greys_r)
# <codecell>
Img_bugEvenGabor = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: evenGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_bugEvenGabor, cmap=cm.Greys_r)
# <markdowncell>
# Quadrature Pairs
# ------------------
#
# Now let's combine both gabor filters to see what will happen.
# <codecell>
def edgeEnergy(x,y,sigma, orientation):
g1= oddGabor2D(x,y,sigma,orientation)
g2= evenGabor2D(x,y,sigma,orientation)
return(g1**2+g2**2)
# <codecell>
plotFilter(lambda x,y:edgeEnergy(x,y,50,0))
# <codecell>
Img_barEdgeEnergy = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: edgeEnergy(x,y,100,1)), mode='same')
imgplot = plt.imshow(Img_barEdgeEnergy, cmap=cm.Greys_r)
# <codecell>
Img_bugEdgeEnergy = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: edgeEnergy(x,y,10,1)), mode='same')
imgplot = plt.imshow(Img_bugEdgeEnergy, cmap=cm.Greys_r)
# <codecell>
|
[
"matplotlib.pyplot.imshow",
"matplotlib.image.imread"
] |
[((427, 450), 'matplotlib.image.imread', 'mpimg.imread', (['"""bar.png"""'], {}), "('bar.png')\n", (439, 450), True, 'import matplotlib.image as mpimg\n'), ((694, 729), 'matplotlib.pyplot.imshow', 'plt.imshow', (['barImg'], {'cmap': 'cm.Greys_r'}), '(barImg, cmap=cm.Greys_r)\n', (704, 729), True, 'import matplotlib.pyplot as plt\n'), ((749, 777), 'matplotlib.image.imread', 'mpimg.imread', (['"""stinkbug.png"""'], {}), "('stinkbug.png')\n", (761, 777), True, 'import matplotlib.image as mpimg\n'), ((983, 1018), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bugImg'], {'cmap': 'cm.Greys_r'}), '(bugImg, cmap=cm.Greys_r)\n', (993, 1018), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2346), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_barGaussian'], {'cmap': 'cm.Greys_r'}), '(Img_barGaussian, cmap=cm.Greys_r)\n', (2312, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2524), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugGaussian'], {'cmap': 'cm.Greys_r'}), '(Img_bugGaussian, cmap=cm.Greys_r)\n', (2490, 2524), True, 'import matplotlib.pyplot as plt\n'), ((3065, 3104), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_barHat'], {'cmap': 'cm.Greys_r'}), '(Img_barHat, cmap=cm.Greys_r)\n', (3075, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3274), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugHat'], {'cmap': 'cm.Greys_r'}), '(Img_bugHat, cmap=cm.Greys_r)\n', (3245, 3274), True, 'import matplotlib.pyplot as plt\n'), ((4178, 4222), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_barOddGabor'], {'cmap': 'cm.Greys_r'}), '(Img_barOddGabor, cmap=cm.Greys_r)\n', (4188, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4481, 4525), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugOddGabor'], {'cmap': 'cm.Greys_r'}), '(Img_bugOddGabor, cmap=cm.Greys_r)\n', (4491, 4525), True, 'import matplotlib.pyplot as plt\n'), ((4794, 4842), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugOddGaborEdge'], {'cmap': 'cm.Greys_r'}), '(Img_bugOddGaborEdge, cmap=cm.Greys_r)\n', (4804, 4842), True, 'import matplotlib.pyplot as plt\n'), ((5091, 5136), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_barEvenGabor'], {'cmap': 'cm.Greys_r'}), '(Img_barEvenGabor, cmap=cm.Greys_r)\n', (5101, 5136), True, 'import matplotlib.pyplot as plt\n'), ((5274, 5319), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugEvenGabor'], {'cmap': 'cm.Greys_r'}), '(Img_bugEvenGabor, cmap=cm.Greys_r)\n', (5284, 5319), True, 'import matplotlib.pyplot as plt\n'), ((5806, 5852), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_barEdgeEnergy'], {'cmap': 'cm.Greys_r'}), '(Img_barEdgeEnergy, cmap=cm.Greys_r)\n', (5816, 5852), True, 'import matplotlib.pyplot as plt\n'), ((5990, 6036), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_bugEdgeEnergy'], {'cmap': 'cm.Greys_r'}), '(Img_bugEdgeEnergy, cmap=cm.Greys_r)\n', (6000, 6036), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1701), 'matplotlib.pyplot.imshow', 'plt.imshow', (['g'], {'cmap': 'cm.Greys_r'}), '(g, cmap=cm.Greys_r)\n', (1681, 1701), True, 'import matplotlib.pyplot as plt\n')]
|
import decimal
import random
import string
import textwrap
import time
import os
from memtrain.memtrain_common.mtstatistics import MtStatistics
class NoResponsesError(Exception):
pass
class Question:
'''Manages the current cue and response interface'''
def __init__(self, settings, database):
# Initialize core objects
self.settings = settings
self.conn = database.conn
self.cur = self.conn.cursor()
self.database = database
self.responses = self.database.get_all_responses()
self.cue_id = 0
self.response_id = 0
self.f_cue = ''
self.mtags = []
self.mchoices = dict()
self.iam = ' '
self.ascii_range = ['a', 'b', 'c', 'd']
self.response = ''
self.user_input = ''
self.synonyms = []
self.plural_responses = [i for i in self.responses if self.is_plural(i)]
self.nonplural_responses = [i for i in self.responses if not self.is_plural(i)]
## Interface text
self.title_text = ''
self.level_text = ''
self.response_number_text = ''
self.cue_text = ''
self.hint_text = ''
self.correctness_str = ''
self.other_answers_str = ''
def get_value(self, value, value_id):
self.cur.execute('''SELECT {} FROM {} WHERE {} = (?)'''
.format(value, value + 's', value + '_id'),
(str(value_id), ))
rows = self.cur.fetchall()
return rows[0][0]
def get_cue(self, cue_id):
return self.get_value('cue', cue_id)
def get_response(self, response_id):
return self.get_value('response', response_id)
def get_hints(self):
# Translate response_id into hint_id
self.cur.execute('''SELECT hint_id FROM responses_to_hints
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate hint_id to hint
for hint_id in rows:
out.append(self.get_value('hint', hint_id))
return out
def get_synonyms(self):
# Translate response_id into synonym_id
self.cur.execute('''SELECT synonym_id FROM responses_to_synonyms
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate synonym_id to synonym
for synonym_id in rows:
out.append(self.get_value('synonym', synonym_id))
return out
def get_mtags(self):
# Translate response_id into mtag_id
self.cur.execute('''SELECT mtag_id FROM responses_to_mtags
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate mtag_id to mtag
for mtag_id in rows:
out.append(self.get_value('mtag', mtag_id))
return out
def get_placement(self, cue_id, response_id):
self.cur.execute('''SELECT placement FROM cues_to_responses
WHERE cue_id = (?) AND response_id = (?)''',
(str(cue_id), str(response_id)))
rows = self.cur.fetchall()
return rows[0][0]
def get_responses_by_mtag(self, mtag):
# Translate mtag_id to mtag
self.cur.execute('''SELECT mtag_id FROM mtags WHERE mtag = (?)''',
(mtag, ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
# Translate mtag_id to response_id
response_ids = []
for mtag_id in rows:
self.cur.execute('''SELECT response_id FROM responses_to_mtags
WHERE mtag_id = (?)''', (mtag_id, ))
more_rows = self.cur.fetchall()
response_ids += list(map(lambda x: x[0], more_rows))
# Translate response_id to response
out = []
for response_id in response_ids:
out.append(self.get_value('response', response_id))
return out
def is_plural(self, string):
'''Detects most plural words in English'''
return string[-1:] == 's' or string[-2:] == 'es'
# Question rendering ######################################################
def format_cue(self):
self.f_cue = self.cue.replace('{{}}', '_' * 9)
if self.placement == 1:
self.f_cue = self.f_cue.replace('{{1}}', '___(1)___')
else:
self.f_cue = self.f_cue.replace('{{1}}', '_' * 9)
if self.placement == 2:
self.f_cue = self.f_cue.replace('{{2}}', '___(2)___')
else:
self.f_cue = self.f_cue.replace('{{2}}', '_' * 9)
if self.placement == 3:
self.f_cue = self.f_cue.replace('{{3}}', '___(3)___')
else:
self.f_cue = self.f_cue.replace('{{3}}', '_' * 9)
return self.f_cue
def main_data_loop(self, cue_id, response_id, mtstatistics, final=False):
'''Main data processing for question rendering'''
# Initialize core objects
self.cue_id = cue_id
self.response_id = response_id
self.mtstatistics = mtstatistics
# Other important data
self.cue = self.get_cue(self.cue_id)
self.response = self.get_response(self.response_id)
self.placement = self.get_placement(self.cue_id, self.response_id)
self.synonyms = self.get_synonyms()
self.hints = self.get_hints()
self.mtags = self.get_mtags()
self.mtstatistics.update_percentage()
# Determine the level
if self.settings.level == '1':
self.level_text = 'Level 1'
elif self.settings.level == '2':
self.level_text = 'Level 2'
elif self.settings.level == '3':
self.level_text = 'Level 3'
# Important text
self.title_text = self.settings.settings['title']
self.response_number_text = 'Response ' + str(self.mtstatistics.response_number) + ' of ' + str(self.mtstatistics.total)
self.correct_so_far_text = str(self.mtstatistics.number_correct) + '/' + str(self.mtstatistics.response_number-1) + ' · ' + str(round(self.mtstatistics.percentage, 1)) + '%'
self.cue_text = self.format_cue()
def generate_mchoices(self):
'''Return the choices for the multiple choice questions'''
out = dict()
# Get responses for all mtags for this response
same_mtag_responses = []
for mtag in self.mtags:
same_mtag_responses += self.get_responses_by_mtag(mtag)
# Get responses of the same and the other plurality
plurality = self.is_plural(self.response)
same_plurality_responses = list(self.plural_responses) if plurality else list(self.nonplural_responses)
other_plurality_responses = list(self.nonplural_responses) if plurality else list(self.plural_responses)
# We will select first from same_mtag_responses. Then, if
# that's empty, we'll select from same_plurality_responses. If
# that's also empty, we'll resort to other_plurality_responses.
# Filter all three of these lists to make sure they don't contain
# the correct response
same_mtag_responses = [i for i in same_mtag_responses if i != self.response]
same_plurality_responses = [i for i in same_plurality_responses if i != self.response]
# The response won't be located in other_plurality_responses.
# Filter the pluarlity_responses lists
same_plurality_responses = [i for i in same_plurality_responses if i not in same_mtag_responses]
other_plurality_responses = [i for i in other_plurality_responses if i not in same_mtag_responses]
# Shuffle the response lists.
random.shuffle(same_mtag_responses)
random.shuffle(same_plurality_responses)
random.shuffle(other_plurality_responses)
# Get the index of the correct answer.
correct_letter = random.choice(self.ascii_range)
response_pool_consumption_index = 0
response_pool = same_mtag_responses
# Loop through the ascii range
for i in self.ascii_range:
# If we have the correct letter, output the correct response.
if i == correct_letter:
this_response = self.response
# Otherwise...
else:
# If the response_pool is empty...
while len(response_pool) == 0:
response_pool_consumption_index = response_pool_consumption_index + 1
if response_pool_consumption_index == 1:
response_pool = same_plurality_responses
elif response_pool_consumption_index == 2:
response_pool = other_plurality_responses
elif response_pool_consumption_index > 2:
raise NoResponsesError('There are no more responses available.')
this_response = response_pool.pop()
# Capitalize only the first letter of this_response
this_response = this_response[0].upper() + this_response[1:]
# Now that we have our choice, insert it into self.mchoices
out[i] = this_response
return out
def validate_input(self):
'''Determine if input is valid'''
self.mtstatistics.is_input_valid = False
if self.settings.level == '1':
if self.user_input in self.ascii_range:
self.mtstatistics.is_input_valid = True
else:
if self.user_input:
self.mtstatistics.is_input_valid = True
def standardize_string(self, string):
'''Standardize strings so they can be compared for correctness'''
# The idea here is that a question shouldn't be marked wrong just
# because the user forgot to enter a hyphen or a space or used the
# wrong case.
#
# Standarization involves the removal of all case, whitespace, and
# hyphens. This means the grading of questions is not case, whitespace,
# or hyphen sensitive.
# Remove case (transform to lowercase)
out = string.lower()
# Remove whitespace
out = ''.join(out.split())
# Remove hyphens
out = out.replace('-', '')
out = out.replace('–', '')
out = out.replace('—', '')
return out
def determine_equivalence(self):
'''See if input matches a synonym or standarized string'''
self.mtstatistics.is_input_correct = False
# If not, does the input match the response?
std_input = self.standardize_string(self.user_input)
std_response = self.standardize_string(self.response)
if std_input == std_response:
self.mtstatistics.used_response = self.response
self.mtstatistics.is_input_correct = True
if not self.mtstatistics.is_input_correct:
# If not, does the input match a synonym?
for synonym in self.synonyms:
std_synonym = self.standardize_string(synonym)
if std_input == std_synonym:
self.mtstatistics.used_synonym = synonym
self.mtstatistics.is_input_correct = True
def grade_input(self):
'''Determine whether input is correct.'''
if self.settings.level == '1':
# For level 1, check to see if the right letter was entered.
# First, translate the letter to its corresponding choice.
self.user_input = self.mchoices[self.user_input]
self.mtstatistics.is_input_correct = self.response.lower() == self.user_input.lower()
else:
# For levels 2 or 3, make sure the right input was entered.
self.determine_equivalence()
def finalize(self):
'''Notify the user of correctness, update statistics, and print'''
if self.mtstatistics.is_input_correct:
self.mtstatistics.number_correct += 1
if self.mtstatistics.has_synonym_been_used():
self.remaining_synonyms = [i for i in self.synonyms if i != self.mtstatistics.used_synonym]
self.correctness_str = 'Correct. Default answer: ' + self.response
else:
self.correctness_str = 'Correct.'
self.other_answers_str = 'Other correct responses: ' + ', '.join(self.synonyms)
else:
self.mtstatistics.number_incorrect += 1
self.mtstatistics.incorrect_responses.append(self.response)
self.correctness_str = 'Incorrect. Answer: ' + self.response
self.other_answers_str = 'Other correct responses: ' + ', '.join(self.synonyms)
self.mtstatistics.response_number += 1
# Reset
self.mtstatistics.used_synonym = ''
|
[
"string.lower",
"random.shuffle",
"random.choice"
] |
[((7974, 8009), 'random.shuffle', 'random.shuffle', (['same_mtag_responses'], {}), '(same_mtag_responses)\n', (7988, 8009), False, 'import random\n'), ((8018, 8058), 'random.shuffle', 'random.shuffle', (['same_plurality_responses'], {}), '(same_plurality_responses)\n', (8032, 8058), False, 'import random\n'), ((8067, 8108), 'random.shuffle', 'random.shuffle', (['other_plurality_responses'], {}), '(other_plurality_responses)\n', (8081, 8108), False, 'import random\n'), ((8182, 8213), 'random.choice', 'random.choice', (['self.ascii_range'], {}), '(self.ascii_range)\n', (8195, 8213), False, 'import random\n'), ((10417, 10431), 'string.lower', 'string.lower', ([], {}), '()\n', (10429, 10431), False, 'import string\n')]
|
'''vmssz.py - class of basic Azure VM scale set operations, without UDs, with zones'''
import json
import azurerm
class VMSSZ():
'''VMSSZ class - encapsulates the model and status of a zone redundant VM scale set'''
def __init__(self, vmssname, vmssmodel, subscription_id, access_token):
'''class initializtion routine - set basic VMSS properties'''
self.name = vmssname
vmssid = vmssmodel['id']
self.rgname = vmssid[vmssid.index('resourceGroups/') + 15:vmssid.index('/providers')]
self.sub_id = subscription_id
self.access_token = access_token
self.model = vmssmodel
self.adminuser = \
vmssmodel['properties']['virtualMachineProfile']['osProfile']['adminUsername']
self.capacity = vmssmodel['sku']['capacity']
self.location = vmssmodel['location']
self.nameprefix = \
vmssmodel['properties']['virtualMachineProfile']['osProfile']['computerNamePrefix']
self.overprovision = vmssmodel['properties']['overprovision']
self.vm_instance_view = None
self.vm_model_view = None
self.pg_list = []
self.zones = []
if 'zones' in vmssmodel:
self.zonal = True
else:
self.zonal = False
# see if it's a tenant spanning scale set
self.singlePlacementGroup = True
if 'singlePlacementGroup' in vmssmodel['properties']:
self.singlePlacementGroup = vmssmodel['properties']['singlePlacementGroup']
self.tier = vmssmodel['sku']['tier']
self.upgradepolicy = vmssmodel['properties']['upgradePolicy']['mode']
self.vmsize = vmssmodel['sku']['name']
# if it's a platform image, or managed disk based custom image, it has
# an imageReference
if 'imageReference' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']:
# if it's a managed disk based custom image it has an id
if 'id' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']:
self.image_type = 'custom'
self.offer = 'custom'
self.sku = 'custom'
img_ref_id = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['id']
self.version = img_ref_id.split(".Compute/", 1)[1]
self.image_resource_id = img_ref_id.split(".Compute/", 1)[0]
else: # platform image
self.image_type = 'platform'
self.offer = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
self.sku = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version']
# else it's an unmanaged disk custom image and has an image URI
else:
self.image_type = 'custom'
if 'osType' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']:
self.offer = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['osType']
else:
self.offer = 'custom'
self.sku = 'custom'
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri']
self.provisioningState = vmssmodel['properties']['provisioningState']
self.status = self.provisioningState
def refresh_model(self):
'''update the model, useful to see if provisioning is complete'''
vmssmodel = azurerm.get_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.model = vmssmodel
self.capacity = vmssmodel['sku']['capacity']
self.vmsize = vmssmodel['sku']['name']
if self.image_type == 'platform':
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version']
else:
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri']
self.provisioningState = vmssmodel['properties']['provisioningState']
self.status = self.provisioningState
self.init_vm_details()
def update_token(self, access_token):
'''update the token property'''
self.access_token = access_token
def update_model(self, newsku, newversion, newvmsize):
'''update the VMSS model with any updated properties'''
changes = 0
if self.sku != newsku:
if self.image_type == 'platform': # sku not relevant for custom image
changes += 1
self.model['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku'] = newsku
self.sku = newsku
else:
self.status = 'You cannot change sku setting for custom image'
if self.version != newversion:
changes += 1
self.version = newversion
if self.image_type == 'platform': # for platform image modify image reference
self.model['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version'] = newversion
else:
# check for managed disk
if 'imageReference' in self.model['properties']['virtualMachineProfile']['storageProfile']:
self.model['properties']['virtualMachineProfile']['storageProfile'][
'imageReference']['id'] = self.image_resource_id + '.Compute/' + newversion
else:
# unmanaged custom image - has a URI which points directly
# to image blob
self.model['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = newversion
if self.vmsize != newvmsize:
changes += 1
# to do - add a check that the new vm size matches the tier
self.model['sku']['name'] = newvmsize
self.vmsize = newvmsize
if changes == 0:
self.status = 'VMSS model is unchanged, skipping update'
else:
# put the vmss model
updateresult = azurerm.update_vmss(self.access_token, self.sub_id, self.rgname,
self.name, json.dumps(self.model))
self.status = updateresult
def scale(self, capacity):
'''set the VMSS to a new capacity'''
self.model['sku']['capacity'] = capacity
scaleoutput = azurerm.scale_vmss(self.access_token, self.sub_id, self.rgname, self.name,
capacity)
self.status = scaleoutput
def poweron(self):
'''power on all the VMs in the scale set'''
result = azurerm.start_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def restart(self):
'''restart all the VMs in the scale set'''
result = azurerm.restart_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def poweroff(self):
'''power off all the VMs in the scale set'''
result = azurerm.poweroff_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def dealloc(self):
'''stop deallocate all the VMs in the scale set'''
result = azurerm.stopdealloc_vmss(
self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def init_vm_instance_view(self):
'''get the VMSS instance view and set the class property'''
# get an instance view list in order to build FD heatmap
self.vm_instance_view = \
azurerm.list_vmss_vm_instance_view(self.access_token, self.sub_id, self.rgname,
self.name)
def init_vm_model_view(self):
'''get the VMSS instance view and set the class property'''
# get a model view list in order to build a zones heatmap
self.vm_model_view = \
azurerm.list_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name)
def reimagevm(self, vmstring):
'''reaimge individual VMs or groups of VMs in a scale set'''
result = azurerm.reimage_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def upgradevm(self, vmstring):
'''upgrade individual VMs or groups of VMs in a scale set'''
result = azurerm.upgrade_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def deletevm(self, vmstring):
'''delete individual VMs or groups of VMs in a scale set'''
result = azurerm.delete_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def startvm(self, vmstring):
'''start individual VMs or groups of VMs in a scale set'''
result = azurerm.start_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def restartvm(self, vmstring):
'''restart individual VMs or groups of VMs in a scale set'''
result = azurerm.restart_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def deallocvm(self, vmstring):
'''dealloc individual VMs or groups of VMs in a scale set'''
result = azurerm.stopdealloc_vmss_vms(self.access_token, self.sub_id, self.rgname,
self.name, vmstring)
self.status = result
def poweroffvm(self, vmstring):
'''power off individual VMs or groups of VMs in a scale set'''
result = azurerm.poweroff_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def get_power_state(self, statuses):
'''get power state from a list of VM isntance statuses'''
for status in statuses:
if status['code'].startswith('Power'):
return status['code'][11:]
def init_zones(self):
'''create a structure to represent VMs by zone and FD
- ignore placement groups for now.
'''
self.zones = []
for zone_id in range(1, 4):
zone = {'zone': zone_id}
fds = []
for fd_num in range(5):
fault_domain = {'fd': fd_num, 'vms': []}
fds.append(fault_domain)
zone['fds'] = fds
self.zones.append(zone)
def init_vm_details(self):
'''Populate the self.zones structure
- with a physically ordered representation of the VMs in a scale set.
'''
self.init_zones()
# get the model view
self.vm_model_view = azurerm.list_vmss_vms(self.access_token, self.sub_id, self.rgname,
self.name)
# get the instance view
self.vm_instance_view = azurerm.list_vmss_vm_instance_view(self.access_token, self.sub_id,
self.rgname, self.name)
# do a loop through the number of VMs and populate VMs properties in the zones structure
# make an assumption that len(vm_model_view) == len(vm_instance_view)
# - true if not actively scaling
for idx in range(len(self.vm_model_view['value'])):
vm_id = self.vm_model_view['value'][idx]['instanceId']
zone_num = self.vm_model_view['value'][idx]['zones'][0]
power_state = self.get_power_state(
self.vm_instance_view['value'][idx]['properties']['instanceView']['statuses'])
fault_domain = self.vm_instance_view['value'][idx]['properties']['instanceView']['platformFaultDomain']
vm_data = {'vmid': vm_id, 'power_state': power_state}
self.zones[int(zone_num)-1]['fds'][fault_domain]['vms'].append(vm_data)
#print(json.dumps(self.zones))
|
[
"azurerm.upgrade_vmss_vms",
"azurerm.start_vmss_vms",
"azurerm.list_vmss_vms",
"azurerm.stopdealloc_vmss_vms",
"azurerm.poweroff_vmss_vms",
"azurerm.poweroff_vmss",
"azurerm.restart_vmss",
"json.dumps",
"azurerm.scale_vmss",
"azurerm.reimage_vmss_vms",
"azurerm.get_vmss",
"azurerm.list_vmss_vm_instance_view",
"azurerm.start_vmss",
"azurerm.delete_vmss_vms",
"azurerm.restart_vmss_vms",
"azurerm.stopdealloc_vmss"
] |
[((3708, 3780), 'azurerm.get_vmss', 'azurerm.get_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (3724, 3780), False, 'import azurerm\n'), ((6687, 6775), 'azurerm.scale_vmss', 'azurerm.scale_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'capacity'], {}), '(self.access_token, self.sub_id, self.rgname, self.name,\n capacity)\n', (6705, 6775), False, 'import azurerm\n'), ((6940, 7014), 'azurerm.start_vmss', 'azurerm.start_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (6958, 7014), False, 'import azurerm\n'), ((7136, 7212), 'azurerm.restart_vmss', 'azurerm.restart_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (7156, 7212), False, 'import azurerm\n'), ((7337, 7414), 'azurerm.poweroff_vmss', 'azurerm.poweroff_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (7358, 7414), False, 'import azurerm\n'), ((7544, 7629), 'azurerm.stopdealloc_vmss', 'azurerm.stopdealloc_vmss', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name\n )\n', (7568, 7629), False, 'import azurerm\n'), ((7884, 7979), 'azurerm.list_vmss_vm_instance_view', 'azurerm.list_vmss_vm_instance_view', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.\n rgname, self.name)\n', (7918, 7979), False, 'import azurerm\n'), ((8234, 8311), 'azurerm.list_vmss_vms', 'azurerm.list_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (8255, 8311), False, 'import azurerm\n'), ((8434, 8529), 'azurerm.reimage_vmss_vms', 'azurerm.reimage_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self.\n name, vmstring)\n', (8458, 8529), False, 'import azurerm\n'), ((8718, 8813), 'azurerm.upgrade_vmss_vms', 'azurerm.upgrade_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self.\n name, vmstring)\n', (8742, 8813), False, 'import azurerm\n'), ((9000, 9094), 'azurerm.delete_vmss_vms', 'azurerm.delete_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self.\n name, vmstring)\n', (9023, 9094), False, 'import azurerm\n'), ((9278, 9371), 'azurerm.start_vmss_vms', 'azurerm.start_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self.\n name, vmstring)\n', (9300, 9371), False, 'import azurerm\n'), ((9558, 9653), 'azurerm.restart_vmss_vms', 'azurerm.restart_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self.\n name, vmstring)\n', (9582, 9653), False, 'import azurerm\n'), ((9842, 9940), 'azurerm.stopdealloc_vmss_vms', 'azurerm.stopdealloc_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname,\n self.name, vmstring)\n', (9870, 9940), False, 'import azurerm\n'), ((10137, 10233), 'azurerm.poweroff_vmss_vms', 'azurerm.poweroff_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name', 'vmstring'], {}), '(self.access_token, self.sub_id, self.rgname, self\n .name, vmstring)\n', (10162, 10233), False, 'import azurerm\n'), ((11254, 11331), 'azurerm.list_vmss_vms', 'azurerm.list_vmss_vms', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.rgname, self.name)\n', (11275, 11331), False, 'import azurerm\n'), ((11447, 11542), 'azurerm.list_vmss_vm_instance_view', 'azurerm.list_vmss_vm_instance_view', (['self.access_token', 'self.sub_id', 'self.rgname', 'self.name'], {}), '(self.access_token, self.sub_id, self.\n rgname, self.name)\n', (11481, 11542), False, 'import azurerm\n'), ((6476, 6498), 'json.dumps', 'json.dumps', (['self.model'], {}), '(self.model)\n', (6486, 6498), False, 'import json\n')]
|
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from utils import Constants
from utils import Utils
####################################################################################
####################################################################################
####################################################################################
# NAIVE BAYES SPECIFIC FUNCTIONS
####################################################################################
####################################################################################
####################################################################################
def create_classification_corpus(data_metric, regex):
"""
:param data_metric:
:param regex: regular expression for text extraction
:param df: path to the data set
:return: tools require for classification engines
df_class,df_words,df_class_count --- refer to ClassificationEngine.py for more details
business logic behind init tools is written here, it basically loops around the
entire data set and separate them into three different components
"""
speech_count, classes, words, word_set, class_words = {}, {}, {}, {}, {}
stem = LancasterStemmer()
stop = set(stopwords.words('english'))
for c in list(set(data[-1] for data in data_metric)):
classes[c] = []
for data in data_metric:
in_class = data[Constants.LABEL_INDEX]
in_speech = data[Constants.CLASSIFICATION_SPEECH_INDEX]
words.update(word_set)
if in_class not in class_words:
class_words[in_class] = [[]]
else:
class_words[in_class].append([])
# word_set = {}
for w in Utils.custom_tokenizer(regex, in_speech.lower()):
# word_set_temp = {}
if w not in stop:
stem_word = stem.stem(w)
word_set[stem_word] = 1 if stem_word not in word_set else word_set[stem_word] + 1
# word_set_temp[stem_word] = word_set[stem_word]
class_words[in_class][len(class_words[in_class])-1].append(stem_word)
# if word_set_temp[stem_word] < 2:
# classes[in_class].extend(word_set_temp)
classes[in_class].append(stem_word)
speech_count[in_class] = 1 if in_class not in speech_count else speech_count[in_class] + 1
return classes, words, speech_count, class_words
def get_word_probabilities(df_class, df_word, df_class_count, frame_class_words):
"""
:param frame_class_words:
:param df_class: all Classes and words (grouped by classes) - dict
:param df_word: all words and word frequencies - dict
:param df_class_count: all classes and class frequencies - dict
:param initial_probabilities: initial probability of any class present in the df_class
:return: every word with probability of classes(word can be in that particular class) - dict
"""
probabilities = {}
for w in df_word:
for c in df_class:
if w not in probabilities:
probabilities[w] = {}
#probability = ((df_class[c].count(w) / len(df_class[c])) * (
# df_class_count[c] / sum(df_class_count.values()))) / (df_word[w] / sum(df_word.values()))
#probability = ((df_class[c].count(w) / len(df_class[c])) * (
# df_word[w] / sum(df_word.values()))) / (df_class_count[c] / sum(df_class_count.values()))
probability_class_words = len([x for x in frame_class_words[c] if w in x]) / len(frame_class_words[c])
probability_words = (df_class[c].count(w) / len(df_class[c]))
#probability_words = (df_word[w] / sum(df_word.values()))
probability_class = (df_class_count[c] / sum(df_class_count.values()))
probability = 0 if probability_words == 0 else (probability_class_words*probability_class)/probability_words
probabilities[w].update({c: probability})
return probabilities
def get_other_class_probabilities(class_probabilities):
"""
:param class_probabilities: probabilities of each class calculated using user input and Naive Bayes word_probabilities
:return: sum of all probabilities
"""
return sum(class_probabilities[c] for c in class_probabilities)
def classify_naive_bayes(speech, probabilities, frame_class, regex):
tokens = Utils.prepare_tokens(speech, regex, set(stopwords.words('english')), LancasterStemmer())
class_probabilities = {}
classify = {}
for c in frame_class:
class_probabilities[c] = 0
for meta in probabilities:
if meta in tokens:
for c in frame_class:
class_probabilities[c] += probabilities[meta][c]
#else:
# for c in frame_class:
# class_probabilities[c] += 1.0 - probabilities[meta][c]
#for c in frame_class:
# class_probabilities[c] = class_probabilities[c]
for c in frame_class:
classify[c] = class_probabilities[c] #/ get_other_class_probabilities(class_probabilities)
return classify
####################################################################################
####################################################################################
####################################################################################
# DECISION TREE SPECIFIC FUNCTIONS
####################################################################################
####################################################################################
####################################################################################
def is_numeric(value): return isinstance(value, int) or isinstance(value, float)
def build_decision_tree(data_metric):
gain, question = find_best_split(data_metric)
if gain == 0:
return Leaf(data_metric)
true_rows, false_rows = partition(data_metric, question)
true_branch = build_decision_tree(true_rows)
false_branch = build_decision_tree(false_rows)
return DecisionNode(question, true_branch, false_branch)
def find_best_split(data_metric):
best_gain = 0
best_question = None
current_uncertainty = gini(data_metric)
n_features = len(data_metric[0]) - 1
for col in range(n_features):
values = set([row[col] for row in data_metric])
for val in values:
question = Question(col, val)
true_rows, false_rows = partition(data_metric, question)
if len(true_rows) == 0 or len(false_rows) == 0:
continue
gain = info_gain(true_rows, false_rows, current_uncertainty)
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question
def gini(data_metric):
counts = class_count(data_metric)
impurity = 1
for l in counts:
prob_of_l = counts[l] / float(len(data_metric))
impurity -= prob_of_l ** 2
return impurity
def class_count(data_metric):
counts = {}
for row in data_metric:
label = row[Constants.LABEL_INDEX]
counts[label] = 1 if label not in counts else counts[label] + 1
return counts
def info_gain(left, right, current_uncertainty):
p = float(len(left)) / (len(left) + len(right))
return current_uncertainty - p * gini(left) - (1 - p) * gini(right)
def partition(data_metric, question):
true_rows, false_rows = [], []
for row in data_metric:
true_rows.append(row) if question.match(row) else false_rows.append(row)
return true_rows, false_rows
class Leaf:
def __init__(self, data_metric):
self.predictions = class_count(data_metric)
class DecisionNode:
def __init__(self, question, true_branch, false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
class Question:
def __init__(self, col, val):
self.col = col
self.val = val
def match(self, example):
val = example[self.col]
try:
return val >= self.val if is_numeric(val) else val == self.val
except TypeError:
return False
def classify_decision_tree(row, tree):
if isinstance(tree, Leaf):
return tree.predictions
return \
classify_decision_tree(row,
tree.true_branch) \
if tree.question.match(row) \
else classify_decision_tree(row,
tree.false_branch)
|
[
"nltk.stem.lancaster.LancasterStemmer",
"nltk.corpus.stopwords.words"
] |
[((1265, 1283), 'nltk.stem.lancaster.LancasterStemmer', 'LancasterStemmer', ([], {}), '()\n', (1281, 1283), False, 'from nltk.stem.lancaster import LancasterStemmer\n'), ((1299, 1325), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1314, 1325), False, 'from nltk.corpus import stopwords\n'), ((4521, 4539), 'nltk.stem.lancaster.LancasterStemmer', 'LancasterStemmer', ([], {}), '()\n', (4537, 4539), False, 'from nltk.stem.lancaster import LancasterStemmer\n'), ((4492, 4518), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4507, 4518), False, 'from nltk.corpus import stopwords\n')]
|
#!/usr/bin/env python3
import os
import logging
import yaml
from discord.ext.commands import Bot, Context, CommandError, CommandOnCooldown
# create logger
log = logging.getLogger(__package__)
log.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('../senko.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(asctime)s] %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(fh)
log.addHandler(ch)
# Import config file
with open('config.yaml') as stream:
config = yaml.safe_load(stream)
# Initialise bot
bot = Bot(command_prefix=config['prefix'])
bot.remove_command('help')
bot.owner = config['owner']
bot.keys = config['keys']
bot.db = config['database']
bot.quiet = config['quiet']
bot.dt = config['dt_channels']
# Load cogs
for file in filter(lambda file: file.endswith('.py'), os.listdir('./cogs')):
bot.load_extension(f'cogs.{file[:-3]}')
# Log bot startup
@bot.event
async def on_ready() -> None:
log.warning(f'We have logged in as {bot.user} in these servers:')
for guild in bot.guilds:
log.warning(f'{guild.name} ({guild.id})')
log.warning(f'({len(bot.guilds)} servers)')
log.warning('************************')
# Handle command cooldown
@bot.event
async def on_command_error(ctx: Context, error: CommandError) -> None:
if isinstance(error, CommandOnCooldown):
await ctx.send(error)
# Start bot
bot.run(config['token'])
|
[
"logging.FileHandler",
"logging.StreamHandler",
"logging.Formatter",
"yaml.safe_load",
"discord.ext.commands.Bot",
"os.listdir",
"logging.getLogger"
] |
[((163, 193), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (180, 193), False, 'import logging\n'), ((280, 315), 'logging.FileHandler', 'logging.FileHandler', (['"""../senko.log"""'], {}), "('../senko.log')\n", (299, 315), False, 'import logging\n'), ((397, 420), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (418, 420), False, 'import logging\n'), ((508, 554), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {}), "('[%(asctime)s] %(message)s')\n", (525, 554), False, 'import logging\n'), ((799, 835), 'discord.ext.commands.Bot', 'Bot', ([], {'command_prefix': "config['prefix']"}), "(command_prefix=config['prefix'])\n", (802, 835), False, 'from discord.ext.commands import Bot, Context, CommandError, CommandOnCooldown\n'), ((752, 774), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (766, 774), False, 'import yaml\n'), ((1071, 1091), 'os.listdir', 'os.listdir', (['"""./cogs"""'], {}), "('./cogs')\n", (1081, 1091), False, 'import os\n')]
|
#!/usr/bin/env python
from control.msg import heaveFeedback, heaveAction, heaveResult
import rospy
import time
import actionlib
class Heave(object):
feedback = heaveFeedback()
result = heaveResult()
def __init__(self, name):
self.heavePub = rospy.Publisher('/heave_setpoint', Float64, queue_size=1)
rospy.Subscriber("/heave", Float64, self.heaveCallback)
self.serverName = name
self.heaveServer = actionlib.SimpleActionServer(
self.serverName,
heaveAction,
execute_cb=self.heaveActionCallback,
auto_start=False)
self.heaveServer.start()
def heaveCallback(self, data):
self.heave = data.data
def heaveActionCallback(self, goal):
success = False
while(goal.heave_setpoint != self.heave):
start = int(time.time())
while(abs(goal.heave_setpoint - self.heave) < 3):
if(int(time.time()) == start + 10):
success = True
break
if(successt):
break
self.heavePub.publish(goal.heave_setpoint)
rospy.loginfo('heave: %f, heave Setpoint: %f, Error: %f', \
self._heave, req.heave_setpoint, \
req.heave_setpoint-self.heave)
if success:
self.result.heave_final = self.heave
rospy.loginfo('%s : Success' % self.serverName)
self.heaveServer.set_succeeded(self.result)
if __name__ == '__main__':
rospy.init_node('heaveServer')
server = Heave(rospy.get_name())
rospy.spin()
|
[
"rospy.Subscriber",
"rospy.Publisher",
"control.msg.heaveFeedback",
"time.time",
"rospy.loginfo",
"rospy.init_node",
"actionlib.SimpleActionServer",
"rospy.get_name",
"rospy.spin",
"control.msg.heaveResult"
] |
[((165, 180), 'control.msg.heaveFeedback', 'heaveFeedback', ([], {}), '()\n', (178, 180), False, 'from control.msg import heaveFeedback, heaveAction, heaveResult\n'), ((194, 207), 'control.msg.heaveResult', 'heaveResult', ([], {}), '()\n', (205, 207), False, 'from control.msg import heaveFeedback, heaveAction, heaveResult\n'), ((1568, 1598), 'rospy.init_node', 'rospy.init_node', (['"""heaveServer"""'], {}), "('heaveServer')\n", (1583, 1598), False, 'import rospy\n'), ((1640, 1652), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1650, 1652), False, 'import rospy\n'), ((263, 320), 'rospy.Publisher', 'rospy.Publisher', (['"""/heave_setpoint"""', 'Float64'], {'queue_size': '(1)'}), "('/heave_setpoint', Float64, queue_size=1)\n", (278, 320), False, 'import rospy\n'), ((329, 384), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/heave"""', 'Float64', 'self.heaveCallback'], {}), "('/heave', Float64, self.heaveCallback)\n", (345, 384), False, 'import rospy\n'), ((443, 561), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['self.serverName', 'heaveAction'], {'execute_cb': 'self.heaveActionCallback', 'auto_start': '(False)'}), '(self.serverName, heaveAction, execute_cb=self.\n heaveActionCallback, auto_start=False)\n', (471, 561), False, 'import actionlib\n'), ((1177, 1305), 'rospy.loginfo', 'rospy.loginfo', (['"""heave: %f, heave Setpoint: %f, Error: %f"""', 'self._heave', 'req.heave_setpoint', '(req.heave_setpoint - self.heave)'], {}), "('heave: %f, heave Setpoint: %f, Error: %f', self._heave, req.\n heave_setpoint, req.heave_setpoint - self.heave)\n", (1190, 1305), False, 'import rospy\n'), ((1618, 1634), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1632, 1634), False, 'import rospy\n'), ((1432, 1479), 'rospy.loginfo', 'rospy.loginfo', (["('%s : Success' % self.serverName)"], {}), "('%s : Success' % self.serverName)\n", (1445, 1479), False, 'import rospy\n'), ((878, 889), 'time.time', 'time.time', ([], {}), '()\n', (887, 889), False, 'import time\n'), ((976, 987), 'time.time', 'time.time', ([], {}), '()\n', (985, 987), False, 'import time\n')]
|
import os
import sys
class LoginLibrary:
def __init__(self):
self._sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '"%s" %s %s' % (self._sut_path, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
|
[
"os.path.dirname",
"os.popen"
] |
[((960, 977), 'os.popen', 'os.popen', (['command'], {}), '(command)\n', (968, 977), False, 'import os\n'), ((106, 131), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import os\n')]
|
import logging
from flytekit import ContainerTask, kwtypes, task, workflow
logger = logging.getLogger(__file__)
calculate_ellipse_area_shell = ContainerTask(
name="ellipse-area-metadata-shell",
input_data_dir="/var/inputs",
output_data_dir="/var/outputs",
inputs=kwtypes(a=float, b=float),
outputs=kwtypes(area=float, metadata=str),
image="pingsutw/raw:v2",
command=[
"sh",
"-c",
"./calculate-ellipse-area.sh /var/inputs /var/outputs;",
],
)
@task
def report_all_calculated_areas(
area_shell: float,
metadata_shell: str,
):
logger.info(f"shell: area={area_shell}, metadata={metadata_shell}")
@workflow
def wf(a: float = 2.0, b: float = 3.0):
area_shell, metadata_shell = calculate_ellipse_area_shell(a=a, b=b)
report_all_calculated_areas(
area_shell=area_shell,
metadata_shell=metadata_shell,
)
if __name__ == "__main__":
print(f"Running wf() {wf(a=2.0, b=3.0)}")
|
[
"flytekit.kwtypes",
"logging.getLogger"
] |
[((86, 113), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import logging\n'), ((282, 307), 'flytekit.kwtypes', 'kwtypes', ([], {'a': 'float', 'b': 'float'}), '(a=float, b=float)\n', (289, 307), False, 'from flytekit import ContainerTask, kwtypes, task, workflow\n'), ((321, 354), 'flytekit.kwtypes', 'kwtypes', ([], {'area': 'float', 'metadata': 'str'}), '(area=float, metadata=str)\n', (328, 354), False, 'from flytekit import ContainerTask, kwtypes, task, workflow\n')]
|
#--------------------------------------------------------------------------#
# This code makes use of all other functions of #
# the package to build a Dash Web App #
#--------------------------------------------------------------------------#
# imports ------------------------------------------------------------------
import plots
import utils
from data_collection import semantic_api
from data_preprocessing import data_preprocess
import requests
import pandas as pd
import dash
import dash_cytoscape as cyto
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
# Data loading and cleaning with Semantic Scholar API -----------------------
#df, all_references_df, total_results, query = semantic_api.get_all_results_from_semantic_scholar()
# Instantiate Dash App ------------------------------------------------------
app = dash.Dash(
__name__, suppress_callback_exceptions = True,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1", 'charSet':'“UTF-8”'}])
server = app.server
app.title = "Research Analytics"
# Layout --------------------------------------------------------------------
app.layout = html.Div(
[
# Banner ------------------------------------------------------------
html.Div(
[
html.A(
[
html.Img(
src="/assets/web.png",
alt="research intelligence"
),
html.H3("research analytics")
],
href="https://jhupiterz.notion.site/Welcome-to-research-intelligence-\
a36796f418b040f6ade944f9c54e87cb",
target='_blank',
className="logo-banner",
),
html.Div(
[
html.A(
"Contribute",
href="https://github.com/jhupiterz/research-analytics",
target='_blank',
className="doc-link"
),
html.A(
"Documentation",
href="https://github.com/jhupiterz/research-analytics/blob/main/README.md",
target='_blank',
className="doc-link"
),
],
className="navbar"
),
],
className="banner",
),
# Search bar ------------------------------------------------------------
html.Div(
[
html.H1(id='topic', children=[]),
html.Div(
[
html.Img(
src='/assets/loupe.png',
className="loupe-img",
),
dcc.Input(
id='search-query',
type = 'text',
placeholder = "Search for keywords (e.g. \"carbon nanotubes\")",
debounce = True,
spellCheck = True,
inputMode = 'latin',
name = 'text',
autoFocus = False,
minLength = 1, maxLength = 60,
autoComplete='off',
disabled = False,
readOnly = False,
size = '60',
n_submit = 0,
),
],
className="search-bar",
),
],
className="search-wrapper"
),
dcc.Store(id='store-initial-query-response', storage_type='memory'),
dcc.Store(id='store-references-query-response', storage_type='memory'),
# Main content ----------------------------------------------------------
html.Div(id='start-page', children=[], className = 'main-body'),
# Footer ----------------------------------------------------------------
html.Footer(
[
html.P(
[
"Built with ",
html.A("Plotly Dash", href="https://plotly.com/dash/", target="_blank")
],
),
html.P(
[
"Powered by ",
html.A("Semantic Scholar", href="https://www.semanticscholar.org/", target="_blank")
],
),
]
),
],
className="app-layout",
)
# Callbacks --------------------------------------------------------------------
# Store response of initial API query
@app.callback(
Output('store-initial-query-response', 'data'),
Input('search-query', 'n_submit'),
Input('search-query', 'value'))
def store_primary_data(n_submit, value):
if n_submit > 0:
url = f"https://api.semanticscholar.org/graph/v1/paper/search?query={value}&limit=30&fields=url,title,abstract,authors,venue,year,referenceCount,citationCount,influentialCitationCount,isOpenAccess,fieldsOfStudy"
response = requests.get(url).json()
df = pd.DataFrame(response['data'])
df = data_preprocess.extract_key_words(df)
return {
'data': df.to_dict("records")
}
# Store dictionary of references of all initial papers
@app.callback(
Output('store-references-query-response', 'data'),
Input('store-initial-query-response', 'data'))
def store_references_data(data):
if data != None:
ref_dict = []
for paper in data['data']:
paper_id = paper['paperId']
url = f"https://api.semanticscholar.org/graph/v1/paper/{paper_id}/references?limit=50&fields=intents,isInfluential,paperId,url,title,abstract,venue,year,referenceCount,citationCount,influentialCitationCount,isOpenAccess,fieldsOfStudy,authors"
response = requests.get(url).json()
ref_data = response['data']
for cited_paper in ref_data:
cited_paper['citedPaper']['citedBy'] = paper_id
ref_dict.append(cited_paper['citedPaper'])
return ref_dict
# Displays start page
@app.callback(
Output('start-page', 'children'),
Input('search-query', 'n_submit'))
def render_content(n_submit):
""" Returns the content of start page.
If there is data then returns tabs
Else, returns default content of start page (blog posts)"""
if n_submit > 0:
return (
html.Div([
dcc.Tabs(id="tabs-example-graph", value = 'tab-1-example-graph', className= "tabs",
children=[
dcc.Tab(label='📊 Search results 📊', value='tab-1-example-graph',
className= "single-tab", selected_className= "single-tab-selected"),
dcc.Tab(label='🤝 Author network 🤝', value='tab-2-example-graph',
className= "single-tab", selected_className= "single-tab-selected"),
dcc.Tab(label='🌐 Paper network 🌐', value='tab-3-example-graph',
className= "single-tab", selected_className= "single-tab-selected")
])
], className= "tabs-container"),
html.Br(),
html.Div(id='tabs-content-example-graph'))
else:
return html.Div(
[
html.Hr(),
html.P("👇 Or check out the latest blog posts about data-driven academia 👇"), html.Br(),
html.Div([
html.A(
href="https://medium.com/@juhartz/are-scholarly-papers-really-the-best-way-to-disseminate-research-f8d85d3eee62",
children=[
html.Img(
alt="Link to my twitter",
src="assets/blogpost_1.png",
className="zoom"
)
], target= '_blank', className= "blog-post-1"
),
html.A(
href="https://medium.com/@juhartz/what-makes-a-research-paper-impactful-a40f33206fd1",
children=[
html.Img(
alt="Link to my twitter",
src="assets/blogpost_2.png",
className='zoom'
)
], target= '_blank', className= "blog-post-2"
)
],className= "blog-posts")],
className= "start-page")
# Returns content of each tab when selected
@app.callback(Output('tabs-content-example-graph', 'children'),
Input('tabs-example-graph', 'value'),
Input('store-references-query-response', 'data'))
def render_tab_content(tab, data_ref = None):
if tab == 'tab-1-example-graph':
if data_ref != None:
return (
html.Div([
html.Div([
html.Div([
html.P("Filter results in time "),
dcc.RangeSlider(1940, 2030, 10, value=[1940, 2030], id='time-range-slider',
allowCross=False, className= "range-slider",
marks={
1940: {'label': '1940', 'style': {'color': 'black'}},
1950: {'label': '1950', 'style': {'color': 'black'}},
1960: {'label': '1960', 'style': {'color': 'black'}},
1970: {'label': '1970', 'style': {'color': 'black'}},
1980: {'label': '1980', 'style': {'color': 'black'}},
1990: {'label': '1990', 'style': {'color': 'black'}},
2000: {'label': '2000', 'style': {'color': 'black'}},
2010: {'label': '2010', 'style': {'color': 'black'}},
2020: {'label': '2020', 'style': {'color': 'black'}},
2030: {'label': '2030', 'style': {'color': 'black'}},
})], className = "global-time-filter"),
html.Div([html.Button(
"Download data",
title = "Downloads data as .CSV file",
id = "btn-download-data",
className="doc-link-download",
n_clicks= 0
),
dcc.Download(id="download-csv")], style = {'order': '2'})],
className= "upper-filters"),
html.Div([
html.Div([
dcc.Loading(id = "loading-icon-1",
children=[
html.Div([
html.Div(id = 'dp-keywords', children= [], className = "keywords-dropdown"),
html.Div(id = 'keywords-graph-all', children= [], className= "keywords-plot")],
className = "keywords-graph")],
type = 'default', className= "loading-keywords"),
html.Div(id = 'accessibility-pie-all', children = [
html.Div([
html.Div(id = 'dp-access', children=[], style = {'order': '2'}),
html.Div(id = 'access-pie-all', children= [], style = {'order': '1', 'margin': 'auto'})],
className= "accessibility-graph"),
html.Div(id = 'fields-pie-all', children = [], className= "fields-pie-graph")],
className= "fields-pie-and-dropdown")],
className= "tab-1-upper-graphs"),
html.Br(),
html.Br(),
html.Div([
html.Div(id = 'active-authors-graph-all', children = [], className= "active-authors-graph"),
html.Div(id = 'publication-graph-all', children = [], className= "citations-graph")],
className= "tab-1-lower-graphs"),
],
className= "tab-1")], className= "tab-1-with-download"))
else:
return html.Div([html.P("Retrieving info about 1000s of papers, please give it a few seconds",
style = {'order': '1', 'font-size': '1.5rem', 'color':'rgba(3, 3, 3, 0.2)',
'text-align': 'center', 'margin-top': '10vh'}),
html.Img(src='/assets/spinner.gif', style= {'order':'2', 'margin': 'auto'})],
style= {'display': 'flex', 'flex-direction':'column', 'justify-content': 'center',
'align-items': 'center', 'min-height': '400px', 'width':'60vw', 'margin': 'auto'})
if tab == 'tab-2-example-graph':
return html.Div([
html.Div([
html.Div([
html.Button('Reset view', id='bt-reset', className= 'reset-button'),
html.Div(id = 'dp-access-cytoscape', children = [], style={'order':'2'})],
className= "dropdown-and-button-cyto-1"),
cyto.Cytoscape(
id='cytoscape-event-callbacks-1',
layout={'name': 'random', 'height': '58vh', 'width': '44vw'},
className= "cyto-1",
stylesheet = [
{
'selector': 'label',
'style': {
'content': 'data(label)',
'color': 'rgba(60, 25, 240, 0.8)',
'font-size':'14vh',
'font-family':'Arial, sans serif',
}
},
{
'selector': 'node',
'style': {
'label': 'data(label)'
}
},
{
'selector': '[selected ^= "True"]',
'style': {
'background-color': 'green',
'line-color': 'green'
}
},
{
'selector': '.author',
'style': {
'background-color': 'rgba(60, 25, 240, 0.8)'
}
},
{
'selector': '.collaboration',
'style': {
'line-color': '#737373',
'width': 1
}
}
])],
className= "cyto-1-and-button-container"),
html.Div(className= 'vl', style = {'order': '2'}),
html.Div([
html.Div([
html.Div(id = 'author-info-1', className= "author-info")],
className= "author-info-container")],
className= "author-info-big-container")
], className= "tab-2")
if tab == 'tab-3-example-graph':
return html.Div([
html.Div([
html.Button('Reset view', id='bt-reset-papers', className= 'reset-button'),
cyto.Cytoscape(
id='cytoscape-event-callbacks-2',
layout={'name': 'random', 'height': '58vh', 'width': '50vw'},
style={'order':'2','height': '58vh', 'width': '50vw'},
#className= "cyto-2",
stylesheet = [
{
'selector': 'node',
'style': {
'background-color': 'rgba(60, 25, 240, 0.8)',
'height': '9vh',
'width': '9vh'
}
},
{
'selector': '.res',
'style': {
'background-color': 'green',
'color': 'red',
'height': '1.2vh',
'width': '1.2vh'
}
},
{
'selector': '.ref',
'style': {
'background-color': 'white',
'color': 'white',
'height': '0.8vh',
'width': '0.8vh'
}
},
{
'selector': '.citation',
'style': {
'line-color': '#737373',
'width': 1
}
}
])],
className= "cyto-2-and-button-container"),
html.Div(className= 'vl', style = {'order': '2'}),
html.Div([
html.Div([
html.Div(id = 'paper-info-1', className= "paper-info")],
className= "paper-info-container")],
className= "paper-info-big-container")],
className= "tab-3")
# Welcome title
@app.callback(
Output('topic', 'children'),
Input('search-query', 'value'))
def display_topic(value):
return "Welcome researcher!"
# Download data as CSV button
@app.callback(
Output("download-csv", "data"),
Input("btn-download-data", "n_clicks"),
Input("store-initial-query-response", "data"),
Input("store-references-query-response", "data"),
Input("time-range-slider", "value"),
prevent_initial_call=True,
)
def func(n_clicks, data_res, data_ref, filter_values):
if data_ref:
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
if n_clicks > 0:
return dcc.send_data_frame(dff_all.to_csv, "research_data.csv")
# Plots and graphs ----------------------------------------------
# keywords
@app.callback(
Output('keywords-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('search-query', 'value'),
Input('dp-keywords-component', 'value'),
Input('time-range-slider', 'value'))
def create_top_key_words_all(data_res, query, filter, filter_values):
"""Returns keywords graph as dcc.Graph component
Only displays it when all data is retrieved"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_res = data_preprocess.filter_data_by_time(dff_res, filter_values)
if filter == 'All':
fig = plots.make_top_key_words(dff_res, query)
else:
index_list = []
for index, row in dff_res.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_res.loc[index_list]
fig = plots.make_top_key_words(dff_filtered,query)
return dcc.Graph(figure=fig, className= "keywords-plotly")
@app.callback(
Output('dp-keywords', 'children'),
Input('store-initial-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_keywords_dorpdown(data_res, filter_values):
"""Returns the dropdown menu according to all fields of study in data
as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_res = data_preprocess.filter_data_by_time(dff_res, filter_values)
fields_of_study = dff_res['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-keywords-component', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-piie')
# loading states for keyword graphs
@app.callback(Output('loading-icon-1', 'children'),
Input('keywords-graph-res', 'children'))
@app.callback(Output('loading-icon-2', 'children'),
Input('keywords-graph-ref', 'children'))
# Accessibility
@app.callback(
Output('dp-access', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_accessibility_pie_dorpdown(data_res, data_ref, filter_values):
"""Returns the dropdown menu according to all fields of study in data
as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fields_of_study = dff_all['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-access-component', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-piie')
@app.callback(
Output('access-pie-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('dp-access-component', 'value'),
Input('time-range-slider', 'value'))
def create_accessibility_pie(data_res, data_ref, filter, filter_values):
"""Returns the accessibility pie graph for all data
as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
if filter == 'All':
fig = plots.make_access_pie(dff_all)
else:
index_list = []
for index, row in dff_all.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_all.loc[index_list]
fig = plots.make_access_pie(dff_filtered)
return dcc.Graph(figure = fig, className= "access-pie-plotly")
# Publications & citations per year
@app.callback(
Output('publication-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_publication_graph_all(data_res, data_ref, filter_values):
"""Returns the pubs + citations graph as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_pubs_cites_per_year_line(dff_all, filter_values)
return dcc.Graph(figure=fig, className= "pub-graph-plotly")
# Fields of study
@app.callback(
Output('fields-pie-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_fields_pie_res(data_res, data_ref, filter_values):
"""Returns the fields pie as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_fields_pie(dff_all)
return dcc.Graph(figure=fig, className= "fields-pie-plotly")
# Most active authors
@app.callback(
Output('active-authors-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_active_authors_graph_res(data_res, data_ref, filter_values):
"""Returns the most active authors graph as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_active_authors(dff_all)
return dcc.Graph(figure=fig, className = "pub-graph-plotly")
# Cytoscapes -------------------------------------------------------------------
@app.callback(
Output('dp-access-cytoscape', 'children'),
Input('store-initial-query-response', 'data'))
def create_dropdown_cytoscape(data_res):
"""Returns the dropdown menu according to all fields
of study as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
fields_of_study = dff_res['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-access-component_cytoscape', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-pie')
@app.callback(
Output('cytoscape-event-callbacks-1', 'elements'),
Output('cytoscape-event-callbacks-1', 'zoom'),
Input('store-initial-query-response', 'data'),
Input('bt-reset', 'n_clicks'),
Input('dp-access-component_cytoscape', 'value'),
Input('cytoscape-event-callbacks-1', 'zoom'))
def generate_collaboration_network(data_res, n_clicks, filter, zoom):
"""Returns the elements of the collaboaration cytoscape in tab 2"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
if filter == 'All':
elements = plots.generate_graph_elements_collab(dff_res)
else:
index_list = []
for index, row in dff_res.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_res.loc[index_list]
elements = plots.generate_graph_elements_collab(dff_filtered)
if n_clicks:
if n_clicks > 0:
zoom = 1
return elements, zoom
return elements, zoom
@app.callback(
Output('cytoscape-event-callbacks-2', 'elements'),
Output('cytoscape-event-callbacks-2', 'zoom'),
Input('store-references-query-response', 'data'),
Input('store-initial-query-response', 'data'),
Input('bt-reset-papers', 'n_clicks'),
Input('cytoscape-event-callbacks-2', 'zoom'))
def generate_citation_network(data_ref, data_res, n_clicks, zoom):
"""Returns the elements of the citation cytoscape in tab 3"""
ref_df = pd.DataFrame(data_ref)
ref_df['reference'] = semantic_api.build_references(ref_df)
res_df = pd.DataFrame(data_res['data'])
res_df['reference'] = semantic_api.build_references(res_df)
elements= plots.generate_graph_elements_network(ref_df, res_df)
if n_clicks:
if n_clicks > 0:
zoom = 1
return elements, zoom
return elements, zoom
# Retrieves info on author
@app.callback(Output('author-info-1', 'children'),
Input('cytoscape-event-callbacks-1', 'tapNodeData'))
def displayTapNodeData(data):
"""Requests and returns the info about an author when node is clicked on"""
if data:
author_info = semantic_api.get_author_info(data['id'])
paragraph = html.Div([
html.B(author_info['name']), html.Br(),html.Br(),
html.Span("Published "), html.B(author_info['paperCount']), html.Span(" papers."), html.Br(),html.Br(),
html.Span("Received "), html.B(author_info['citationCount']), html.Span(" citations."), html.Br(),html.Br(),
html.Span(f"h index: "), html.B(author_info['hIndex']), html.Br(), html.Br(),
html.A("Semantic Scholar profile", href = author_info['url'], target= '_blank')],
className = "author-info-text"),
return paragraph
else:
return html.P("Click on a node to display information about an author",
className= "author-info-default-text")
# Retrieves info on paper
@app.callback(Output('paper-info-1', 'children'),
Input('cytoscape-event-callbacks-2', 'tapNodeData'))
def displayTapNodeData(data):
"""Requests and returns the info about a paper when node is clicked on"""
if data:
paper_info = semantic_api.get_paper_info(data['id'])
if 'paperId' in paper_info:
if paper_info['isOpenAccess']:
oa = ''
else:
oa = 'NOT'
if paper_info['abstract'] == None:
paper_info['abstract'] = 'No abstract available for this paper.'
paragraph = html.Div([html.Br(), html.B(paper_info['title']), html.Br(),html.Br(),
html.Li([html.Span("Published in "), html.B(paper_info['year'])]),
html.Li([html.Span("Includes "), html.B(paper_info['referenceCount']), html.Span(" references.")]),
html.Li([html.Span("Received "), html.B(paper_info['citationCount']), html.Span(" citations.")]),
html.Li([html.Span("Is "), html.B(oa), html.Span(" open access.", style = {'font-size': '1.5vh', 'color': 'black'})]),
html.Li([html.A(' Semantic Scholar URL', href = paper_info['url'], target = '_blank')]), html.Br(),
html.B("Abstract"), html.Br(),
html.Span(paper_info['abstract'])],
className= "paper-info-text")
else:
paragraph = html.P("No info available for this paper", className= "paper-info-default-no-info")
return paragraph
else:
return html.P("Click on a node to display information about a paper",
className= "paper-info-default-text")
# Runs the app ------------------------------------------------------------
if __name__ == '__main__':
app.run_server(debug=True, use_reloader=False)
|
[
"plots.make_fields_pie",
"dash.dcc.Store",
"plots.make_access_pie",
"dash.dcc.Input",
"dash.dcc.Tab",
"plots.generate_graph_elements_network",
"data_collection.semantic_api.get_paper_info",
"dash.dcc.Graph",
"plots.make_top_key_words",
"pandas.DataFrame",
"dash.Dash",
"plots.make_pubs_cites_per_year_line",
"data_collection.semantic_api.get_author_info",
"data_preprocessing.data_preprocess.extract_key_words",
"dash.html.Div",
"dash.html.Button",
"dash.dcc.RangeSlider",
"dash.dcc.Dropdown",
"dash.html.P",
"requests.get",
"plots.make_active_authors",
"dash.html.Hr",
"pandas.concat",
"dash.html.H3",
"dash.html.Span",
"dash.dcc.send_data_frame",
"dash.html.A",
"plots.generate_graph_elements_collab",
"dash.dependencies.Input",
"dash.html.H1",
"dash_cytoscape.Cytoscape",
"data_preprocessing.data_preprocess.filter_data_by_time",
"dash.html.B",
"data_collection.semantic_api.build_references",
"dash.html.Img",
"utils.flatten_list",
"dash.html.Br",
"dash.dcc.Download",
"dash.dependencies.Output"
] |
[((934, 1102), 'dash.Dash', 'dash.Dash', (['__name__'], {'suppress_callback_exceptions': '(True)', 'meta_tags': "[{'name': 'viewport', 'content': 'width=device-width, initial-scale=1',\n 'charSet': '“UTF-8”'}]"}), "(__name__, suppress_callback_exceptions=True, meta_tags=[{'name':\n 'viewport', 'content': 'width=device-width, initial-scale=1', 'charSet':\n '“UTF-8”'}])\n", (943, 1102), False, 'import dash\n'), ((5130, 5176), 'dash.dependencies.Output', 'Output', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (5136, 5176), False, 'from dash.dependencies import Input, Output\n'), ((5182, 5215), 'dash.dependencies.Input', 'Input', (['"""search-query"""', '"""n_submit"""'], {}), "('search-query', 'n_submit')\n", (5187, 5215), False, 'from dash.dependencies import Input, Output\n'), ((5221, 5251), 'dash.dependencies.Input', 'Input', (['"""search-query"""', '"""value"""'], {}), "('search-query', 'value')\n", (5226, 5251), False, 'from dash.dependencies import Input, Output\n'), ((5835, 5884), 'dash.dependencies.Output', 'Output', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (5841, 5884), False, 'from dash.dependencies import Input, Output\n'), ((5890, 5935), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (5895, 5935), False, 'from dash.dependencies import Input, Output\n'), ((6661, 6693), 'dash.dependencies.Output', 'Output', (['"""start-page"""', '"""children"""'], {}), "('start-page', 'children')\n", (6667, 6693), False, 'from dash.dependencies import Input, Output\n'), ((6699, 6732), 'dash.dependencies.Input', 'Input', (['"""search-query"""', '"""n_submit"""'], {}), "('search-query', 'n_submit')\n", (6704, 6732), False, 'from dash.dependencies import Input, Output\n'), ((9158, 9206), 'dash.dependencies.Output', 'Output', (['"""tabs-content-example-graph"""', '"""children"""'], {}), "('tabs-content-example-graph', 'children')\n", (9164, 9206), False, 'from dash.dependencies import Input, Output\n'), ((9222, 9258), 'dash.dependencies.Input', 'Input', (['"""tabs-example-graph"""', '"""value"""'], {}), "('tabs-example-graph', 'value')\n", (9227, 9258), False, 'from dash.dependencies import Input, Output\n'), ((9274, 9322), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (9279, 9322), False, 'from dash.dependencies import Input, Output\n'), ((19458, 19485), 'dash.dependencies.Output', 'Output', (['"""topic"""', '"""children"""'], {}), "('topic', 'children')\n", (19464, 19485), False, 'from dash.dependencies import Input, Output\n'), ((19491, 19521), 'dash.dependencies.Input', 'Input', (['"""search-query"""', '"""value"""'], {}), "('search-query', 'value')\n", (19496, 19521), False, 'from dash.dependencies import Input, Output\n'), ((19632, 19662), 'dash.dependencies.Output', 'Output', (['"""download-csv"""', '"""data"""'], {}), "('download-csv', 'data')\n", (19638, 19662), False, 'from dash.dependencies import Input, Output\n'), ((19668, 19706), 'dash.dependencies.Input', 'Input', (['"""btn-download-data"""', '"""n_clicks"""'], {}), "('btn-download-data', 'n_clicks')\n", (19673, 19706), False, 'from dash.dependencies import Input, Output\n'), ((19712, 19757), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (19717, 19757), False, 'from dash.dependencies import Input, Output\n'), ((19763, 19811), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (19768, 19811), False, 'from dash.dependencies import Input, Output\n'), ((19817, 19852), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (19822, 19852), False, 'from dash.dependencies import Input, Output\n'), ((20856, 20886), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (20868, 20886), True, 'import pandas as pd\n'), ((20934, 20993), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_res', 'filter_values'], {}), '(dff_res, filter_values)\n', (20969, 20993), False, 'from data_preprocessing import data_preprocess\n'), ((21415, 21465), 'dash.dcc.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'className': '"""keywords-plotly"""'}), "(figure=fig, className='keywords-plotly')\n", (21424, 21465), False, 'from dash import dcc\n'), ((20450, 20490), 'dash.dependencies.Output', 'Output', (['"""keywords-graph-all"""', '"""children"""'], {}), "('keywords-graph-all', 'children')\n", (20456, 20490), False, 'from dash.dependencies import Input, Output\n'), ((20496, 20541), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (20501, 20541), False, 'from dash.dependencies import Input, Output\n'), ((20547, 20577), 'dash.dependencies.Input', 'Input', (['"""search-query"""', '"""value"""'], {}), "('search-query', 'value')\n", (20552, 20577), False, 'from dash.dependencies import Input, Output\n'), ((20583, 20622), 'dash.dependencies.Input', 'Input', (['"""dp-keywords-component"""', '"""value"""'], {}), "('dp-keywords-component', 'value')\n", (20588, 20622), False, 'from dash.dependencies import Input, Output\n'), ((20628, 20663), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (20633, 20663), False, 'from dash.dependencies import Input, Output\n'), ((21795, 21825), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (21807, 21825), True, 'import pandas as pd\n'), ((21873, 21932), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_res', 'filter_values'], {}), '(dff_res, filter_values)\n', (21908, 21932), False, 'from data_preprocessing import data_preprocess\n'), ((22086, 22109), 'utils.flatten_list', 'utils.flatten_list', (['res'], {}), '(res)\n', (22104, 22109), False, 'import utils\n'), ((22173, 22336), 'dash.dcc.Dropdown', 'dcc.Dropdown', ([], {'id': '"""dp-keywords-component"""', 'value': '"""All"""', 'options': 'options', 'clearable': '(False)', 'placeholder': '"""Select a field of study"""', 'className': '"""dp-access-piie"""'}), "(id='dp-keywords-component', value='All', options=options,\n clearable=False, placeholder='Select a field of study', className=\n 'dp-access-piie')\n", (22185, 22336), False, 'from dash import dcc\n'), ((21487, 21520), 'dash.dependencies.Output', 'Output', (['"""dp-keywords"""', '"""children"""'], {}), "('dp-keywords', 'children')\n", (21493, 21520), False, 'from dash.dependencies import Input, Output\n'), ((21526, 21571), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (21531, 21571), False, 'from dash.dependencies import Input, Output\n'), ((21577, 21612), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (21582, 21612), False, 'from dash.dependencies import Input, Output\n'), ((23051, 23081), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (23063, 23081), True, 'import pandas as pd\n'), ((23129, 23151), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (23141, 23151), True, 'import pandas as pd\n'), ((23202, 23231), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (23211, 23231), True, 'import pandas as pd\n'), ((23246, 23305), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (23281, 23305), False, 'from data_preprocessing import data_preprocess\n'), ((23459, 23482), 'utils.flatten_list', 'utils.flatten_list', (['res'], {}), '(res)\n', (23477, 23482), False, 'import utils\n'), ((23546, 23707), 'dash.dcc.Dropdown', 'dcc.Dropdown', ([], {'id': '"""dp-access-component"""', 'value': '"""All"""', 'options': 'options', 'clearable': '(False)', 'placeholder': '"""Select a field of study"""', 'className': '"""dp-access-piie"""'}), "(id='dp-access-component', value='All', options=options,\n clearable=False, placeholder='Select a field of study', className=\n 'dp-access-piie')\n", (23558, 23707), False, 'from dash import dcc\n'), ((22435, 22471), 'dash.dependencies.Output', 'Output', (['"""loading-icon-1"""', '"""children"""'], {}), "('loading-icon-1', 'children')\n", (22441, 22471), False, 'from dash.dependencies import Input, Output\n'), ((22487, 22526), 'dash.dependencies.Input', 'Input', (['"""keywords-graph-res"""', '"""children"""'], {}), "('keywords-graph-res', 'children')\n", (22492, 22526), False, 'from dash.dependencies import Input, Output\n'), ((22543, 22579), 'dash.dependencies.Output', 'Output', (['"""loading-icon-2"""', '"""children"""'], {}), "('loading-icon-2', 'children')\n", (22549, 22579), False, 'from dash.dependencies import Input, Output\n'), ((22595, 22634), 'dash.dependencies.Input', 'Input', (['"""keywords-graph-ref"""', '"""children"""'], {}), "('keywords-graph-ref', 'children')\n", (22600, 22634), False, 'from dash.dependencies import Input, Output\n'), ((22672, 22703), 'dash.dependencies.Output', 'Output', (['"""dp-access"""', '"""children"""'], {}), "('dp-access', 'children')\n", (22678, 22703), False, 'from dash.dependencies import Input, Output\n'), ((22709, 22754), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (22714, 22754), False, 'from dash.dependencies import Input, Output\n'), ((22760, 22808), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (22765, 22808), False, 'from dash.dependencies import Input, Output\n'), ((22814, 22849), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (22819, 22849), False, 'from dash.dependencies import Input, Output\n'), ((24180, 24210), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (24192, 24210), True, 'import pandas as pd\n'), ((24258, 24280), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (24270, 24280), True, 'import pandas as pd\n'), ((24331, 24360), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (24340, 24360), True, 'import pandas as pd\n'), ((24375, 24434), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (24410, 24434), False, 'from data_preprocessing import data_preprocess\n'), ((24837, 24889), 'dash.dcc.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'className': '"""access-pie-plotly"""'}), "(figure=fig, className='access-pie-plotly')\n", (24846, 24889), False, 'from dash import dcc\n'), ((23775, 23811), 'dash.dependencies.Output', 'Output', (['"""access-pie-all"""', '"""children"""'], {}), "('access-pie-all', 'children')\n", (23781, 23811), False, 'from dash.dependencies import Input, Output\n'), ((23817, 23862), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (23822, 23862), False, 'from dash.dependencies import Input, Output\n'), ((23868, 23916), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (23873, 23916), False, 'from dash.dependencies import Input, Output\n'), ((23922, 23959), 'dash.dependencies.Input', 'Input', (['"""dp-access-component"""', '"""value"""'], {}), "('dp-access-component', 'value')\n", (23927, 23959), False, 'from dash.dependencies import Input, Output\n'), ((23965, 24000), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (23970, 24000), False, 'from dash.dependencies import Input, Output\n'), ((25293, 25323), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (25305, 25323), True, 'import pandas as pd\n'), ((25371, 25393), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (25383, 25393), True, 'import pandas as pd\n'), ((25444, 25473), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (25453, 25473), True, 'import pandas as pd\n'), ((25488, 25547), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (25523, 25547), False, 'from data_preprocessing import data_preprocess\n'), ((25558, 25617), 'plots.make_pubs_cites_per_year_line', 'plots.make_pubs_cites_per_year_line', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (25593, 25617), False, 'import plots\n'), ((25629, 25680), 'dash.dcc.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'className': '"""pub-graph-plotly"""'}), "(figure=fig, className='pub-graph-plotly')\n", (25638, 25680), False, 'from dash import dcc\n'), ((24949, 24992), 'dash.dependencies.Output', 'Output', (['"""publication-graph-all"""', '"""children"""'], {}), "('publication-graph-all', 'children')\n", (24955, 24992), False, 'from dash.dependencies import Input, Output\n'), ((24998, 25043), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (25003, 25043), False, 'from dash.dependencies import Input, Output\n'), ((25049, 25097), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (25054, 25097), False, 'from dash.dependencies import Input, Output\n'), ((25103, 25138), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (25108, 25138), False, 'from dash.dependencies import Input, Output\n'), ((26038, 26068), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (26050, 26068), True, 'import pandas as pd\n'), ((26116, 26138), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (26128, 26138), True, 'import pandas as pd\n'), ((26189, 26218), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (26198, 26218), True, 'import pandas as pd\n'), ((26233, 26292), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (26268, 26292), False, 'from data_preprocessing import data_preprocess\n'), ((26303, 26333), 'plots.make_fields_pie', 'plots.make_fields_pie', (['dff_all'], {}), '(dff_all)\n', (26324, 26333), False, 'import plots\n'), ((26345, 26397), 'dash.dcc.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'className': '"""fields-pie-plotly"""'}), "(figure=fig, className='fields-pie-plotly')\n", (26354, 26397), False, 'from dash import dcc\n'), ((25720, 25756), 'dash.dependencies.Output', 'Output', (['"""fields-pie-all"""', '"""children"""'], {}), "('fields-pie-all', 'children')\n", (25726, 25756), False, 'from dash.dependencies import Input, Output\n'), ((25762, 25807), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (25767, 25807), False, 'from dash.dependencies import Input, Output\n'), ((25813, 25861), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (25818, 25861), False, 'from dash.dependencies import Input, Output\n'), ((25867, 25902), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (25872, 25902), False, 'from dash.dependencies import Input, Output\n'), ((26794, 26824), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (26806, 26824), True, 'import pandas as pd\n'), ((26872, 26894), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (26884, 26894), True, 'import pandas as pd\n'), ((26945, 26974), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (26954, 26974), True, 'import pandas as pd\n'), ((26989, 27048), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (27024, 27048), False, 'from data_preprocessing import data_preprocess\n'), ((27059, 27093), 'plots.make_active_authors', 'plots.make_active_authors', (['dff_all'], {}), '(dff_all)\n', (27084, 27093), False, 'import plots\n'), ((27105, 27156), 'dash.dcc.Graph', 'dcc.Graph', ([], {'figure': 'fig', 'className': '"""pub-graph-plotly"""'}), "(figure=fig, className='pub-graph-plotly')\n", (27114, 27156), False, 'from dash import dcc\n'), ((26441, 26487), 'dash.dependencies.Output', 'Output', (['"""active-authors-graph-all"""', '"""children"""'], {}), "('active-authors-graph-all', 'children')\n", (26447, 26487), False, 'from dash.dependencies import Input, Output\n'), ((26493, 26538), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (26498, 26538), False, 'from dash.dependencies import Input, Output\n'), ((26544, 26592), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (26549, 26592), False, 'from dash.dependencies import Input, Output\n'), ((26598, 26633), 'dash.dependencies.Input', 'Input', (['"""time-range-slider"""', '"""value"""'], {}), "('time-range-slider', 'value')\n", (26603, 26633), False, 'from dash.dependencies import Input, Output\n'), ((27513, 27543), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (27525, 27543), True, 'import pandas as pd\n'), ((27730, 27753), 'utils.flatten_list', 'utils.flatten_list', (['res'], {}), '(res)\n', (27748, 27753), False, 'import utils\n'), ((27817, 27987), 'dash.dcc.Dropdown', 'dcc.Dropdown', ([], {'id': '"""dp-access-component_cytoscape"""', 'value': '"""All"""', 'options': 'options', 'clearable': '(False)', 'placeholder': '"""Select a field of study"""', 'className': '"""dp-access-pie"""'}), "(id='dp-access-component_cytoscape', value='All', options=\n options, clearable=False, placeholder='Select a field of study',\n className='dp-access-pie')\n", (27829, 27987), False, 'from dash import dcc\n'), ((27260, 27301), 'dash.dependencies.Output', 'Output', (['"""dp-access-cytoscape"""', '"""children"""'], {}), "('dp-access-cytoscape', 'children')\n", (27266, 27301), False, 'from dash.dependencies import Input, Output\n'), ((27307, 27352), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (27312, 27352), False, 'from dash.dependencies import Input, Output\n'), ((28502, 28532), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (28514, 28532), True, 'import pandas as pd\n'), ((28055, 28104), 'dash.dependencies.Output', 'Output', (['"""cytoscape-event-callbacks-1"""', '"""elements"""'], {}), "('cytoscape-event-callbacks-1', 'elements')\n", (28061, 28104), False, 'from dash.dependencies import Input, Output\n'), ((28110, 28155), 'dash.dependencies.Output', 'Output', (['"""cytoscape-event-callbacks-1"""', '"""zoom"""'], {}), "('cytoscape-event-callbacks-1', 'zoom')\n", (28116, 28155), False, 'from dash.dependencies import Input, Output\n'), ((28161, 28206), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (28166, 28206), False, 'from dash.dependencies import Input, Output\n'), ((28212, 28241), 'dash.dependencies.Input', 'Input', (['"""bt-reset"""', '"""n_clicks"""'], {}), "('bt-reset', 'n_clicks')\n", (28217, 28241), False, 'from dash.dependencies import Input, Output\n'), ((28247, 28294), 'dash.dependencies.Input', 'Input', (['"""dp-access-component_cytoscape"""', '"""value"""'], {}), "('dp-access-component_cytoscape', 'value')\n", (28252, 28294), False, 'from dash.dependencies import Input, Output\n'), ((28300, 28344), 'dash.dependencies.Input', 'Input', (['"""cytoscape-event-callbacks-1"""', '"""zoom"""'], {}), "('cytoscape-event-callbacks-1', 'zoom')\n", (28305, 28344), False, 'from dash.dependencies import Input, Output\n'), ((29585, 29607), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (29597, 29607), True, 'import pandas as pd\n'), ((29634, 29671), 'data_collection.semantic_api.build_references', 'semantic_api.build_references', (['ref_df'], {}), '(ref_df)\n', (29663, 29671), False, 'from data_collection import semantic_api\n'), ((29685, 29715), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (29697, 29715), True, 'import pandas as pd\n'), ((29742, 29779), 'data_collection.semantic_api.build_references', 'semantic_api.build_references', (['res_df'], {}), '(res_df)\n', (29771, 29779), False, 'from data_collection import semantic_api\n'), ((29794, 29847), 'plots.generate_graph_elements_network', 'plots.generate_graph_elements_network', (['ref_df', 'res_df'], {}), '(ref_df, res_df)\n', (29831, 29847), False, 'import plots\n'), ((29140, 29189), 'dash.dependencies.Output', 'Output', (['"""cytoscape-event-callbacks-2"""', '"""elements"""'], {}), "('cytoscape-event-callbacks-2', 'elements')\n", (29146, 29189), False, 'from dash.dependencies import Input, Output\n'), ((29195, 29240), 'dash.dependencies.Output', 'Output', (['"""cytoscape-event-callbacks-2"""', '"""zoom"""'], {}), "('cytoscape-event-callbacks-2', 'zoom')\n", (29201, 29240), False, 'from dash.dependencies import Input, Output\n'), ((29246, 29294), 'dash.dependencies.Input', 'Input', (['"""store-references-query-response"""', '"""data"""'], {}), "('store-references-query-response', 'data')\n", (29251, 29294), False, 'from dash.dependencies import Input, Output\n'), ((29300, 29345), 'dash.dependencies.Input', 'Input', (['"""store-initial-query-response"""', '"""data"""'], {}), "('store-initial-query-response', 'data')\n", (29305, 29345), False, 'from dash.dependencies import Input, Output\n'), ((29351, 29387), 'dash.dependencies.Input', 'Input', (['"""bt-reset-papers"""', '"""n_clicks"""'], {}), "('bt-reset-papers', 'n_clicks')\n", (29356, 29387), False, 'from dash.dependencies import Input, Output\n'), ((29393, 29437), 'dash.dependencies.Input', 'Input', (['"""cytoscape-event-callbacks-2"""', '"""zoom"""'], {}), "('cytoscape-event-callbacks-2', 'zoom')\n", (29398, 29437), False, 'from dash.dependencies import Input, Output\n'), ((30027, 30062), 'dash.dependencies.Output', 'Output', (['"""author-info-1"""', '"""children"""'], {}), "('author-info-1', 'children')\n", (30033, 30062), False, 'from dash.dependencies import Input, Output\n'), ((30078, 30129), 'dash.dependencies.Input', 'Input', (['"""cytoscape-event-callbacks-1"""', '"""tapNodeData"""'], {}), "('cytoscape-event-callbacks-1', 'tapNodeData')\n", (30083, 30129), False, 'from dash.dependencies import Input, Output\n'), ((31160, 31194), 'dash.dependencies.Output', 'Output', (['"""paper-info-1"""', '"""children"""'], {}), "('paper-info-1', 'children')\n", (31166, 31194), False, 'from dash.dependencies import Input, Output\n'), ((31210, 31261), 'dash.dependencies.Input', 'Input', (['"""cytoscape-event-callbacks-2"""', '"""tapNodeData"""'], {}), "('cytoscape-event-callbacks-2', 'tapNodeData')\n", (31215, 31261), False, 'from dash.dependencies import Input, Output\n'), ((4029, 4096), 'dash.dcc.Store', 'dcc.Store', ([], {'id': '"""store-initial-query-response"""', 'storage_type': '"""memory"""'}), "(id='store-initial-query-response', storage_type='memory')\n", (4038, 4096), False, 'from dash import dcc\n'), ((4106, 4176), 'dash.dcc.Store', 'dcc.Store', ([], {'id': '"""store-references-query-response"""', 'storage_type': '"""memory"""'}), "(id='store-references-query-response', storage_type='memory')\n", (4115, 4176), False, 'from dash import dcc\n'), ((4277, 4338), 'dash.html.Div', 'html.Div', ([], {'id': '"""start-page"""', 'children': '[]', 'className': '"""main-body"""'}), "(id='start-page', children=[], className='main-body')\n", (4285, 4338), False, 'from dash import html\n'), ((5592, 5622), 'pandas.DataFrame', 'pd.DataFrame', (["response['data']"], {}), "(response['data'])\n", (5604, 5622), True, 'import pandas as pd\n'), ((5636, 5673), 'data_preprocessing.data_preprocess.extract_key_words', 'data_preprocess.extract_key_words', (['df'], {}), '(df)\n', (5669, 5673), False, 'from data_preprocessing import data_preprocess\n'), ((19977, 20007), 'pandas.DataFrame', 'pd.DataFrame', (["data_res['data']"], {}), "(data_res['data'])\n", (19989, 20007), True, 'import pandas as pd\n'), ((20063, 20085), 'pandas.DataFrame', 'pd.DataFrame', (['data_ref'], {}), '(data_ref)\n', (20075, 20085), True, 'import pandas as pd\n'), ((20144, 20173), 'pandas.concat', 'pd.concat', (['[dff_res, dff_ref]'], {}), '([dff_res, dff_ref])\n', (20153, 20173), True, 'import pandas as pd\n'), ((20192, 20251), 'data_preprocessing.data_preprocess.filter_data_by_time', 'data_preprocess.filter_data_by_time', (['dff_all', 'filter_values'], {}), '(dff_all, filter_values)\n', (20227, 20251), False, 'from data_preprocessing import data_preprocess\n'), ((21032, 21072), 'plots.make_top_key_words', 'plots.make_top_key_words', (['dff_res', 'query'], {}), '(dff_res, query)\n', (21056, 21072), False, 'import plots\n'), ((21359, 21404), 'plots.make_top_key_words', 'plots.make_top_key_words', (['dff_filtered', 'query'], {}), '(dff_filtered, query)\n', (21383, 21404), False, 'import plots\n'), ((24473, 24503), 'plots.make_access_pie', 'plots.make_access_pie', (['dff_all'], {}), '(dff_all)\n', (24494, 24503), False, 'import plots\n'), ((24790, 24825), 'plots.make_access_pie', 'plots.make_access_pie', (['dff_filtered'], {}), '(dff_filtered)\n', (24811, 24825), False, 'import plots\n'), ((28609, 28654), 'plots.generate_graph_elements_collab', 'plots.generate_graph_elements_collab', (['dff_res'], {}), '(dff_res)\n', (28645, 28654), False, 'import plots\n'), ((28946, 28996), 'plots.generate_graph_elements_collab', 'plots.generate_graph_elements_collab', (['dff_filtered'], {}), '(dff_filtered)\n', (28982, 28996), False, 'import plots\n'), ((30276, 30316), 'data_collection.semantic_api.get_author_info', 'semantic_api.get_author_info', (["data['id']"], {}), "(data['id'])\n", (30304, 30316), False, 'from data_collection import semantic_api\n'), ((30988, 31095), 'dash.html.P', 'html.P', (['"""Click on a node to display information about an author"""'], {'className': '"""author-info-default-text"""'}), "('Click on a node to display information about an author', className=\n 'author-info-default-text')\n", (30994, 31095), False, 'from dash import html\n'), ((31405, 31444), 'data_collection.semantic_api.get_paper_info', 'semantic_api.get_paper_info', (["data['id']"], {}), "(data['id'])\n", (31432, 31444), False, 'from data_collection import semantic_api\n'), ((32825, 32929), 'dash.html.P', 'html.P', (['"""Click on a node to display information about a paper"""'], {'className': '"""paper-info-default-text"""'}), "('Click on a node to display information about a paper', className=\n 'paper-info-default-text')\n", (32831, 32929), False, 'from dash import html\n'), ((7714, 7723), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (7721, 7723), False, 'from dash import html\n'), ((7733, 7774), 'dash.html.Div', 'html.Div', ([], {'id': '"""tabs-content-example-graph"""'}), "(id='tabs-content-example-graph')\n", (7741, 7774), False, 'from dash import html\n'), ((20296, 20352), 'dash.dcc.send_data_frame', 'dcc.send_data_frame', (['dff_all.to_csv', '"""research_data.csv"""'], {}), "(dff_all.to_csv, 'research_data.csv')\n", (20315, 20352), False, 'from dash import dcc\n'), ((32691, 32778), 'dash.html.P', 'html.P', (['"""No info available for this paper"""'], {'className': '"""paper-info-default-no-info"""'}), "('No info available for this paper', className=\n 'paper-info-default-no-info')\n", (32697, 32778), False, 'from dash import html\n'), ((2854, 2886), 'dash.html.H1', 'html.H1', ([], {'id': '"""topic"""', 'children': '[]'}), "(id='topic', children=[])\n", (2861, 2886), False, 'from dash import html\n'), ((5554, 5571), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5566, 5571), False, 'import requests\n'), ((7841, 7850), 'dash.html.Hr', 'html.Hr', ([], {}), '()\n', (7848, 7850), False, 'from dash import html\n'), ((7868, 7945), 'dash.html.P', 'html.P', (['"""👇 Or check out the latest blog posts about data-driven academia 👇"""'], {}), "('👇 Or check out the latest blog posts about data-driven academia 👇')\n", (7874, 7945), False, 'from dash import html\n'), ((7953, 7962), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (7960, 7962), False, 'from dash import html\n'), ((16441, 16487), 'dash.html.Div', 'html.Div', ([], {'className': '"""vl"""', 'style': "{'order': '2'}"}), "(className='vl', style={'order': '2'})\n", (16449, 16487), False, 'from dash import html\n'), ((19039, 19085), 'dash.html.Div', 'html.Div', ([], {'className': '"""vl"""', 'style': "{'order': '2'}"}), "(className='vl', style={'order': '2'})\n", (19047, 19085), False, 'from dash import html\n'), ((6366, 6383), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6378, 6383), False, 'import requests\n'), ((13424, 13631), 'dash.html.P', 'html.P', (['"""Retrieving info about 1000s of papers, please give it a few seconds"""'], {'style': "{'order': '1', 'font-size': '1.5rem', 'color': 'rgba(3, 3, 3, 0.2)',\n 'text-align': 'center', 'margin-top': '10vh'}"}), "('Retrieving info about 1000s of papers, please give it a few seconds',\n style={'order': '1', 'font-size': '1.5rem', 'color':\n 'rgba(3, 3, 3, 0.2)', 'text-align': 'center', 'margin-top': '10vh'})\n", (13430, 13631), False, 'from dash import html\n'), ((13735, 13810), 'dash.html.Img', 'html.Img', ([], {'src': '"""/assets/spinner.gif"""', 'style': "{'order': '2', 'margin': 'auto'}"}), "(src='/assets/spinner.gif', style={'order': '2', 'margin': 'auto'})\n", (13743, 13810), False, 'from dash import html\n'), ((30369, 30396), 'dash.html.B', 'html.B', (["author_info['name']"], {}), "(author_info['name'])\n", (30375, 30396), False, 'from dash import html\n'), ((30398, 30407), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30405, 30407), False, 'from dash import html\n'), ((30408, 30417), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30415, 30417), False, 'from dash import html\n'), ((30440, 30463), 'dash.html.Span', 'html.Span', (['"""Published """'], {}), "('Published ')\n", (30449, 30463), False, 'from dash import html\n'), ((30465, 30498), 'dash.html.B', 'html.B', (["author_info['paperCount']"], {}), "(author_info['paperCount'])\n", (30471, 30498), False, 'from dash import html\n'), ((30500, 30521), 'dash.html.Span', 'html.Span', (['""" papers."""'], {}), "(' papers.')\n", (30509, 30521), False, 'from dash import html\n'), ((30523, 30532), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30530, 30532), False, 'from dash import html\n'), ((30533, 30542), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30540, 30542), False, 'from dash import html\n'), ((30565, 30587), 'dash.html.Span', 'html.Span', (['"""Received """'], {}), "('Received ')\n", (30574, 30587), False, 'from dash import html\n'), ((30589, 30625), 'dash.html.B', 'html.B', (["author_info['citationCount']"], {}), "(author_info['citationCount'])\n", (30595, 30625), False, 'from dash import html\n'), ((30627, 30651), 'dash.html.Span', 'html.Span', (['""" citations."""'], {}), "(' citations.')\n", (30636, 30651), False, 'from dash import html\n'), ((30653, 30662), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30660, 30662), False, 'from dash import html\n'), ((30663, 30672), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30670, 30672), False, 'from dash import html\n'), ((30695, 30718), 'dash.html.Span', 'html.Span', (['f"""h index: """'], {}), "(f'h index: ')\n", (30704, 30718), False, 'from dash import html\n'), ((30720, 30749), 'dash.html.B', 'html.B', (["author_info['hIndex']"], {}), "(author_info['hIndex'])\n", (30726, 30749), False, 'from dash import html\n'), ((30751, 30760), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30758, 30760), False, 'from dash import html\n'), ((30762, 30771), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (30769, 30771), False, 'from dash import html\n'), ((30794, 30870), 'dash.html.A', 'html.A', (['"""Semantic Scholar profile"""'], {'href': "author_info['url']", 'target': '"""_blank"""'}), "('Semantic Scholar profile', href=author_info['url'], target='_blank')\n", (30800, 30870), False, 'from dash import html\n'), ((31755, 31764), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (31762, 31764), False, 'from dash import html\n'), ((31766, 31793), 'dash.html.B', 'html.B', (["paper_info['title']"], {}), "(paper_info['title'])\n", (31772, 31793), False, 'from dash import html\n'), ((31795, 31804), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (31802, 31804), False, 'from dash import html\n'), ((31805, 31814), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (31812, 31814), False, 'from dash import html\n'), ((32449, 32458), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (32456, 32458), False, 'from dash import html\n'), ((32492, 32510), 'dash.html.B', 'html.B', (['"""Abstract"""'], {}), "('Abstract')\n", (32498, 32510), False, 'from dash import html\n'), ((32512, 32521), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (32519, 32521), False, 'from dash import html\n'), ((32555, 32588), 'dash.html.Span', 'html.Span', (["paper_info['abstract']"], {}), "(paper_info['abstract'])\n", (32564, 32588), False, 'from dash import html\n'), ((1448, 1508), 'dash.html.Img', 'html.Img', ([], {'src': '"""/assets/web.png"""', 'alt': '"""research intelligence"""'}), "(src='/assets/web.png', alt='research intelligence')\n", (1456, 1508), False, 'from dash import html\n'), ((1616, 1645), 'dash.html.H3', 'html.H3', (['"""research analytics"""'], {}), "('research analytics')\n", (1623, 1645), False, 'from dash import html\n'), ((1994, 2113), 'dash.html.A', 'html.A', (['"""Contribute"""'], {'href': '"""https://github.com/jhupiterz/research-analytics"""', 'target': '"""_blank"""', 'className': '"""doc-link"""'}), "('Contribute', href='https://github.com/jhupiterz/research-analytics',\n target='_blank', className='doc-link')\n", (2000, 2113), False, 'from dash import html\n'), ((2275, 2422), 'dash.html.A', 'html.A', (['"""Documentation"""'], {'href': '"""https://github.com/jhupiterz/research-analytics/blob/main/README.md"""', 'target': '"""_blank"""', 'className': '"""doc-link"""'}), "('Documentation', href=\n 'https://github.com/jhupiterz/research-analytics/blob/main/README.md',\n target='_blank', className='doc-link')\n", (2281, 2422), False, 'from dash import html\n'), ((2960, 3016), 'dash.html.Img', 'html.Img', ([], {'src': '"""/assets/loupe.png"""', 'className': '"""loupe-img"""'}), "(src='/assets/loupe.png', className='loupe-img')\n", (2968, 3016), False, 'from dash import html\n'), ((3126, 3429), 'dash.dcc.Input', 'dcc.Input', ([], {'id': '"""search-query"""', 'type': '"""text"""', 'placeholder': '"""Search for keywords (e.g. "carbon nanotubes")"""', 'debounce': '(True)', 'spellCheck': '(True)', 'inputMode': '"""latin"""', 'name': '"""text"""', 'autoFocus': '(False)', 'minLength': '(1)', 'maxLength': '(60)', 'autoComplete': '"""off"""', 'disabled': '(False)', 'readOnly': '(False)', 'size': '"""60"""', 'n_submit': '(0)'}), '(id=\'search-query\', type=\'text\', placeholder=\n \'Search for keywords (e.g. "carbon nanotubes")\', debounce=True,\n spellCheck=True, inputMode=\'latin\', name=\'text\', autoFocus=False,\n minLength=1, maxLength=60, autoComplete=\'off\', disabled=False, readOnly\n =False, size=\'60\', n_submit=0)\n', (3135, 3429), False, 'from dash import dcc\n'), ((4578, 4649), 'dash.html.A', 'html.A', (['"""Plotly Dash"""'], {'href': '"""https://plotly.com/dash/"""', 'target': '"""_blank"""'}), "('Plotly Dash', href='https://plotly.com/dash/', target='_blank')\n", (4584, 4649), False, 'from dash import html\n'), ((4802, 4891), 'dash.html.A', 'html.A', (['"""Semantic Scholar"""'], {'href': '"""https://www.semanticscholar.org/"""', 'target': '"""_blank"""'}), "('Semantic Scholar', href='https://www.semanticscholar.org/', target=\n '_blank')\n", (4808, 4891), False, 'from dash import html\n'), ((14456, 15110), 'dash_cytoscape.Cytoscape', 'cyto.Cytoscape', ([], {'id': '"""cytoscape-event-callbacks-1"""', 'layout': "{'name': 'random', 'height': '58vh', 'width': '44vw'}", 'className': '"""cyto-1"""', 'stylesheet': '[{\'selector\': \'label\', \'style\': {\'content\': \'data(label)\', \'color\':\n \'rgba(60, 25, 240, 0.8)\', \'font-size\': \'14vh\', \'font-family\':\n \'Arial, sans serif\'}}, {\'selector\': \'node\', \'style\': {\'label\':\n \'data(label)\'}}, {\'selector\': \'[selected ^= "True"]\', \'style\': {\n \'background-color\': \'green\', \'line-color\': \'green\'}}, {\'selector\':\n \'.author\', \'style\': {\'background-color\': \'rgba(60, 25, 240, 0.8)\'}}, {\n \'selector\': \'.collaboration\', \'style\': {\'line-color\': \'#737373\',\n \'width\': 1}}]'}), '(id=\'cytoscape-event-callbacks-1\', layout={\'name\': \'random\',\n \'height\': \'58vh\', \'width\': \'44vw\'}, className=\'cyto-1\', stylesheet=[{\n \'selector\': \'label\', \'style\': {\'content\': \'data(label)\', \'color\':\n \'rgba(60, 25, 240, 0.8)\', \'font-size\': \'14vh\', \'font-family\':\n \'Arial, sans serif\'}}, {\'selector\': \'node\', \'style\': {\'label\':\n \'data(label)\'}}, {\'selector\': \'[selected ^= "True"]\', \'style\': {\n \'background-color\': \'green\', \'line-color\': \'green\'}}, {\'selector\':\n \'.author\', \'style\': {\'background-color\': \'rgba(60, 25, 240, 0.8)\'}}, {\n \'selector\': \'.collaboration\', \'style\': {\'line-color\': \'#737373\',\n \'width\': 1}}])\n', (14470, 15110), True, 'import dash_cytoscape as cyto\n'), ((16962, 17035), 'dash.html.Button', 'html.Button', (['"""Reset view"""'], {'id': '"""bt-reset-papers"""', 'className': '"""reset-button"""'}), "('Reset view', id='bt-reset-papers', className='reset-button')\n", (16973, 17035), False, 'from dash import html\n'), ((17058, 17690), 'dash_cytoscape.Cytoscape', 'cyto.Cytoscape', ([], {'id': '"""cytoscape-event-callbacks-2"""', 'layout': "{'name': 'random', 'height': '58vh', 'width': '50vw'}", 'style': "{'order': '2', 'height': '58vh', 'width': '50vw'}", 'stylesheet': "[{'selector': 'node', 'style': {'background-color':\n 'rgba(60, 25, 240, 0.8)', 'height': '9vh', 'width': '9vh'}}, {\n 'selector': '.res', 'style': {'background-color': 'green', 'color':\n 'red', 'height': '1.2vh', 'width': '1.2vh'}}, {'selector': '.ref',\n 'style': {'background-color': 'white', 'color': 'white', 'height':\n '0.8vh', 'width': '0.8vh'}}, {'selector': '.citation', 'style': {\n 'line-color': '#737373', 'width': 1}}]"}), "(id='cytoscape-event-callbacks-2', layout={'name': 'random',\n 'height': '58vh', 'width': '50vw'}, style={'order': '2', 'height':\n '58vh', 'width': '50vw'}, stylesheet=[{'selector': 'node', 'style': {\n 'background-color': 'rgba(60, 25, 240, 0.8)', 'height': '9vh', 'width':\n '9vh'}}, {'selector': '.res', 'style': {'background-color': 'green',\n 'color': 'red', 'height': '1.2vh', 'width': '1.2vh'}}, {'selector':\n '.ref', 'style': {'background-color': 'white', 'color': 'white',\n 'height': '0.8vh', 'width': '0.8vh'}}, {'selector': '.citation',\n 'style': {'line-color': '#737373', 'width': 1}}])\n", (17072, 17690), True, 'import dash_cytoscape as cyto\n'), ((12879, 12888), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (12886, 12888), False, 'from dash import html\n'), ((12910, 12919), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (12917, 12919), False, 'from dash import html\n'), ((31857, 31883), 'dash.html.Span', 'html.Span', (['"""Published in """'], {}), "('Published in ')\n", (31866, 31883), False, 'from dash import html\n'), ((31885, 31911), 'dash.html.B', 'html.B', (["paper_info['year']"], {}), "(paper_info['year'])\n", (31891, 31911), False, 'from dash import html\n'), ((31956, 31978), 'dash.html.Span', 'html.Span', (['"""Includes """'], {}), "('Includes ')\n", (31965, 31978), False, 'from dash import html\n'), ((31980, 32016), 'dash.html.B', 'html.B', (["paper_info['referenceCount']"], {}), "(paper_info['referenceCount'])\n", (31986, 32016), False, 'from dash import html\n'), ((32018, 32043), 'dash.html.Span', 'html.Span', (['""" references."""'], {}), "(' references.')\n", (32027, 32043), False, 'from dash import html\n'), ((32088, 32110), 'dash.html.Span', 'html.Span', (['"""Received """'], {}), "('Received ')\n", (32097, 32110), False, 'from dash import html\n'), ((32112, 32147), 'dash.html.B', 'html.B', (["paper_info['citationCount']"], {}), "(paper_info['citationCount'])\n", (32118, 32147), False, 'from dash import html\n'), ((32149, 32173), 'dash.html.Span', 'html.Span', (['""" citations."""'], {}), "(' citations.')\n", (32158, 32173), False, 'from dash import html\n'), ((32218, 32234), 'dash.html.Span', 'html.Span', (['"""Is """'], {}), "('Is ')\n", (32227, 32234), False, 'from dash import html\n'), ((32236, 32246), 'dash.html.B', 'html.B', (['oa'], {}), '(oa)\n', (32242, 32246), False, 'from dash import html\n'), ((32248, 32322), 'dash.html.Span', 'html.Span', (['""" open access."""'], {'style': "{'font-size': '1.5vh', 'color': 'black'}"}), "(' open access.', style={'font-size': '1.5vh', 'color': 'black'})\n", (32257, 32322), False, 'from dash import html\n'), ((32369, 32441), 'dash.html.A', 'html.A', (['""" Semantic Scholar URL"""'], {'href': "paper_info['url']", 'target': '"""_blank"""'}), "(' Semantic Scholar URL', href=paper_info['url'], target='_blank')\n", (32375, 32441), False, 'from dash import html\n'), ((7131, 7267), 'dash.dcc.Tab', 'dcc.Tab', ([], {'label': '"""📊 Search results 📊"""', 'value': '"""tab-1-example-graph"""', 'className': '"""single-tab"""', 'selected_className': '"""single-tab-selected"""'}), "(label='📊 Search results 📊', value='tab-1-example-graph',\n className='single-tab', selected_className='single-tab-selected')\n", (7138, 7267), False, 'from dash import dcc\n'), ((7307, 7443), 'dash.dcc.Tab', 'dcc.Tab', ([], {'label': '"""🤝 Author network 🤝"""', 'value': '"""tab-2-example-graph"""', 'className': '"""single-tab"""', 'selected_className': '"""single-tab-selected"""'}), "(label='🤝 Author network 🤝', value='tab-2-example-graph',\n className='single-tab', selected_className='single-tab-selected')\n", (7314, 7443), False, 'from dash import dcc\n'), ((7483, 7619), 'dash.dcc.Tab', 'dcc.Tab', ([], {'label': '"""🌐 Paper network 🌐"""', 'value': '"""tab-3-example-graph"""', 'className': '"""single-tab"""', 'selected_className': '"""single-tab-selected"""'}), "(label='🌐 Paper network 🌐', value='tab-3-example-graph', className\n ='single-tab', selected_className='single-tab-selected')\n", (7490, 7619), False, 'from dash import dcc\n'), ((14202, 14268), 'dash.html.Button', 'html.Button', (['"""Reset view"""'], {'id': '"""bt-reset"""', 'className': '"""reset-button"""'}), "('Reset view', id='bt-reset', className='reset-button')\n", (14213, 14268), False, 'from dash import html\n'), ((14291, 14360), 'dash.html.Div', 'html.Div', ([], {'id': '"""dp-access-cytoscape"""', 'children': '[]', 'style': "{'order': '2'}"}), "(id='dp-access-cytoscape', children=[], style={'order': '2'})\n", (14299, 14360), False, 'from dash import html\n'), ((16587, 16640), 'dash.html.Div', 'html.Div', ([], {'id': '"""author-info-1"""', 'className': '"""author-info"""'}), "(id='author-info-1', className='author-info')\n", (16595, 16640), False, 'from dash import html\n'), ((19181, 19232), 'dash.html.Div', 'html.Div', ([], {'id': '"""paper-info-1"""', 'className': '"""paper-info"""'}), "(id='paper-info-1', className='paper-info')\n", (19189, 19232), False, 'from dash import html\n'), ((8210, 8296), 'dash.html.Img', 'html.Img', ([], {'alt': '"""Link to my twitter"""', 'src': '"""assets/blogpost_1.png"""', 'className': '"""zoom"""'}), "(alt='Link to my twitter', src='assets/blogpost_1.png', className=\n 'zoom')\n", (8218, 8296), False, 'from dash import html\n'), ((8709, 8795), 'dash.html.Img', 'html.Img', ([], {'alt': '"""Link to my twitter"""', 'src': '"""assets/blogpost_2.png"""', 'className': '"""zoom"""'}), "(alt='Link to my twitter', src='assets/blogpost_2.png', className=\n 'zoom')\n", (8717, 8795), False, 'from dash import html\n'), ((9562, 9595), 'dash.html.P', 'html.P', (['"""Filter results in time """'], {}), "('Filter results in time ')\n", (9568, 9595), False, 'from dash import html\n'), ((9621, 10346), 'dash.dcc.RangeSlider', 'dcc.RangeSlider', (['(1940)', '(2030)', '(10)'], {'value': '[1940, 2030]', 'id': '"""time-range-slider"""', 'allowCross': '(False)', 'className': '"""range-slider"""', 'marks': "{(1940): {'label': '1940', 'style': {'color': 'black'}}, (1950): {'label':\n '1950', 'style': {'color': 'black'}}, (1960): {'label': '1960', 'style':\n {'color': 'black'}}, (1970): {'label': '1970', 'style': {'color':\n 'black'}}, (1980): {'label': '1980', 'style': {'color': 'black'}}, (\n 1990): {'label': '1990', 'style': {'color': 'black'}}, (2000): {'label':\n '2000', 'style': {'color': 'black'}}, (2010): {'label': '2010', 'style':\n {'color': 'black'}}, (2020): {'label': '2020', 'style': {'color':\n 'black'}}, (2030): {'label': '2030', 'style': {'color': 'black'}}}"}), "(1940, 2030, 10, value=[1940, 2030], id='time-range-slider',\n allowCross=False, className='range-slider', marks={(1940): {'label':\n '1940', 'style': {'color': 'black'}}, (1950): {'label': '1950', 'style':\n {'color': 'black'}}, (1960): {'label': '1960', 'style': {'color':\n 'black'}}, (1970): {'label': '1970', 'style': {'color': 'black'}}, (\n 1980): {'label': '1980', 'style': {'color': 'black'}}, (1990): {'label':\n '1990', 'style': {'color': 'black'}}, (2000): {'label': '2000', 'style':\n {'color': 'black'}}, (2010): {'label': '2010', 'style': {'color':\n 'black'}}, (2020): {'label': '2020', 'style': {'color': 'black'}}, (\n 2030): {'label': '2030', 'style': {'color': 'black'}}})\n", (9636, 10346), False, 'from dash import dcc\n'), ((10964, 11101), 'dash.html.Button', 'html.Button', (['"""Download data"""'], {'title': '"""Downloads data as .CSV file"""', 'id': '"""btn-download-data"""', 'className': '"""doc-link-download"""', 'n_clicks': '(0)'}), "('Download data', title='Downloads data as .CSV file', id=\n 'btn-download-data', className='doc-link-download', n_clicks=0)\n", (10975, 11101), False, 'from dash import html\n'), ((11321, 11352), 'dash.dcc.Download', 'dcc.Download', ([], {'id': '"""download-csv"""'}), "(id='download-csv')\n", (11333, 11352), False, 'from dash import dcc\n'), ((12997, 13088), 'dash.html.Div', 'html.Div', ([], {'id': '"""active-authors-graph-all"""', 'children': '[]', 'className': '"""active-authors-graph"""'}), "(id='active-authors-graph-all', children=[], className=\n 'active-authors-graph')\n", (13005, 13088), False, 'from dash import html\n'), ((13114, 13192), 'dash.html.Div', 'html.Div', ([], {'id': '"""publication-graph-all"""', 'children': '[]', 'className': '"""citations-graph"""'}), "(id='publication-graph-all', children=[], className='citations-graph')\n", (13122, 13192), False, 'from dash import html\n'), ((12566, 12638), 'dash.html.Div', 'html.Div', ([], {'id': '"""fields-pie-all"""', 'children': '[]', 'className': '"""fields-pie-graph"""'}), "(id='fields-pie-all', children=[], className='fields-pie-graph')\n", (12574, 12638), False, 'from dash import html\n'), ((11682, 11752), 'dash.html.Div', 'html.Div', ([], {'id': '"""dp-keywords"""', 'children': '[]', 'className': '"""keywords-dropdown"""'}), "(id='dp-keywords', children=[], className='keywords-dropdown')\n", (11690, 11752), False, 'from dash import html\n'), ((11795, 11868), 'dash.html.Div', 'html.Div', ([], {'id': '"""keywords-graph-all"""', 'children': '[]', 'className': '"""keywords-plot"""'}), "(id='keywords-graph-all', children=[], className='keywords-plot')\n", (11803, 11868), False, 'from dash import html\n'), ((12247, 12306), 'dash.html.Div', 'html.Div', ([], {'id': '"""dp-access"""', 'children': '[]', 'style': "{'order': '2'}"}), "(id='dp-access', children=[], style={'order': '2'})\n", (12255, 12306), False, 'from dash import html\n'), ((12344, 12430), 'dash.html.Div', 'html.Div', ([], {'id': '"""access-pie-all"""', 'children': '[]', 'style': "{'order': '1', 'margin': 'auto'}"}), "(id='access-pie-all', children=[], style={'order': '1', 'margin':\n 'auto'})\n", (12352, 12430), False, 'from dash import html\n')]
|
import os
import sys
import argparse
try:
from aceinna.bootstrap.cli import CommandLine
from aceinna.framework.constants import BAUDRATE_LIST
except: # pylint: disable=bare-except
print('load package from local')
sys.path.append('./src')
from aceinna.bootstrap.cli import CommandLine
from aceinna.framework.constants import BAUDRATE_LIST
def receive_args():
"""parse input arguments
"""
parser = argparse.ArgumentParser(
description='Aceinna python driver input args command:')
# parser.add_argument("-host", type=str, help="host type", default='web')
# for host as web
parser.add_argument("-p", "--port", type=int,
help="Webserver port")
parser.add_argument("--device-type", type=str,
help="Open Device Type")
parser.add_argument("-b", "--baudrate", type=int,
help="Baudrate for uart", choices=BAUDRATE_LIST)
parser.add_argument("-c", "--com-port", type=str,
help="COM Port")
parser.add_argument("--console-log", dest='console_log', action='store_true',
help="Output log on console", default=False)
parser.add_argument("--debug", dest='debug', action='store_true',
help="Log debug information", default=False)
parser.add_argument("--with-data-log", dest='with_data_log', action='store_true',
help="Contains internal data log (OpenIMU only)", default=False)
parser.add_argument("--with-raw-log", dest='with_raw_log', action='store_true',
help="Contains raw data log (OpenRTK only)", default=False)
return parser.parse_args()
def main():
'''start'''
input_args = receive_args()
command_line = CommandLine(
device_type=input_args.device_type,
com_port=input_args.com_port,
port=input_args.port,
baudrate=input_args.baudrate,
console_log=input_args.console_log,
debug=input_args.debug,
with_data_log=input_args.with_data_log,
with_raw_log=input_args.with_raw_log
)
command_line.listen()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt: # response for KeyboardInterrupt such as Ctrl+C
print('User stop this program by KeyboardInterrupt! File:[{0}], Line:[{1}]'.format(
__file__, sys._getframe().f_lineno))
sys.exit()
except: # pylint: disable=bare-except
os._exit(1)
|
[
"sys.path.append",
"argparse.ArgumentParser",
"aceinna.bootstrap.cli.CommandLine",
"sys._getframe",
"os._exit",
"sys.exit"
] |
[((436, 521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Aceinna python driver input args command:"""'}), "(description='Aceinna python driver input args command:'\n )\n", (459, 521), False, 'import argparse\n'), ((1790, 2075), 'aceinna.bootstrap.cli.CommandLine', 'CommandLine', ([], {'device_type': 'input_args.device_type', 'com_port': 'input_args.com_port', 'port': 'input_args.port', 'baudrate': 'input_args.baudrate', 'console_log': 'input_args.console_log', 'debug': 'input_args.debug', 'with_data_log': 'input_args.with_data_log', 'with_raw_log': 'input_args.with_raw_log'}), '(device_type=input_args.device_type, com_port=input_args.\n com_port, port=input_args.port, baudrate=input_args.baudrate,\n console_log=input_args.console_log, debug=input_args.debug,\n with_data_log=input_args.with_data_log, with_raw_log=input_args.\n with_raw_log)\n', (1801, 2075), False, 'from aceinna.bootstrap.cli import CommandLine\n'), ((231, 255), 'sys.path.append', 'sys.path.append', (['"""./src"""'], {}), "('./src')\n", (246, 255), False, 'import sys\n'), ((2435, 2445), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2443, 2445), False, 'import sys\n'), ((2497, 2508), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2505, 2508), False, 'import os\n'), ((2400, 2415), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (2413, 2415), False, 'import sys\n')]
|
from reindeer import Disease
# Part I
disease = Disease("day24-input.txt")
found, result = disease.battle()
print(f"Answer part I: {result}")
# Part II
for boost in range(10000):
body = Disease("day24-input.txt", boost)
found, result = body.battle()
if found:
break
print(f"Answer part II: {result} (with boost={boost})")
|
[
"reindeer.Disease"
] |
[((50, 76), 'reindeer.Disease', 'Disease', (['"""day24-input.txt"""'], {}), "('day24-input.txt')\n", (57, 76), False, 'from reindeer import Disease\n'), ((194, 227), 'reindeer.Disease', 'Disease', (['"""day24-input.txt"""', 'boost'], {}), "('day24-input.txt', boost)\n", (201, 227), False, 'from reindeer import Disease\n')]
|
import pytest
def test_data():
from awkwardql.data import (RecordArray,
PrimitiveArray,
ListArray,
UnionArray,
instantiate)
# data in columnar form
events = RecordArray({
"muons": ListArray([0, 3, 3, 5], [3, 3, 5, 9], RecordArray({
"pt": PrimitiveArray([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]),
"iso": PrimitiveArray([0, 0, 100, 50, 30, 1, 2, 3, 4])
})),
"jets": ListArray([0, 5, 6, 8], [5, 6, 8, 12], RecordArray({
"pt": PrimitiveArray([1, 2, 3, 4, 5, 100, 30, 50, 1, 2, 3, 4]),
"mass": PrimitiveArray([10, 10, 10, 10, 10, 5, 15, 15, 9, 8, 7, 6])
})),
"met": PrimitiveArray([100, 200, 300, 400])
})
# same data in rowwise form
assert events == [
{'muons': [
{'pt': 1.1, 'iso': 0},
{'pt': 2.2, 'iso': 0},
{'pt': 3.3, 'iso': 100}],
'jets': [
{'pt': 1, 'mass': 10},
{'pt': 2, 'mass': 10},
{'pt': 3, 'mass': 10},
{'pt': 4, 'mass': 10},
{'pt': 5, 'mass': 10}],
'met': 100},
{'muons': [],
'jets': [{'pt': 100, 'mass': 5}],
'met': 200},
{'muons': [
{'pt': 4.4, 'iso': 50},
{'pt': 5.5, 'iso': 30}],
'jets': [
{'pt': 30, 'mass': 15},
{'pt': 50, 'mass': 15}],
'met': 300},
{'muons': [
{'pt': 6.6, 'iso': 1},
{'pt': 7.7, 'iso': 2},
{'pt': 8.8, 'iso': 3},
{'pt': 9.9, 'iso': 4}],
'jets': [
{'pt': 1, 'mass': 9},
{'pt': 2, 'mass': 8},
{'pt': 3, 'mass': 7},
{'pt': 4, 'mass': 6}],
'met': 400}]
# projection down to the numerical values
assert events["muons"]["pt"] == [[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]
# single record object
assert events[0] == {
'muons': [
{'pt': 1.1, 'iso': 0},
{'pt': 2.2, 'iso': 0},
{'pt': 3.3, 'iso': 100}],
'jets': [
{'pt': 1, 'mass': 10},
{'pt': 2, 'mass': 10},
{'pt': 3, 'mass': 10},
{'pt': 4, 'mass': 10},
{'pt': 5, 'mass': 10}],
'met': 100}
# integer and string indexes commute, but string-string and integer-integer do not
assert events["muons"][0] == events[0]["muons"]
assert events["muons"][0]["pt"] == events[0]["muons"]["pt"]
assert events["muons"][0][2] == events[0]["muons"][2]
assert events["muons"][0]["pt"][2] == events[0]["muons"]["pt"][2]
assert events["muons"][0]["pt"][2] == events[0]["muons"][2]["pt"]
assert events["muons"][0]["pt"][2] == events["muons"][0][2]["pt"]
assert events["muons"][0]["pt"][2] == events["muons"]["pt"][0][2]
events.setindex()
muonpt = events.contents["muons"].content.contents["pt"]
assert muonpt.row == [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2), (3, 3)]
assert muonpt.col == ("muons", "pt")
muoniso = events.contents["muons"].content.contents["iso"]
assert muonpt.row == muoniso.row
assert muonpt.row.same(muoniso.row)
c1, c2 = muonpt.col.tolist()
for i, (r1, r2) in enumerate(muonpt.row):
assert events[c1][c2][r1][r2] == muonpt[i]
instantiate(events)
egamma = UnionArray([0, 0, 1, 0, 1, 1, 1, 0, 0], [0, 1, 0, 2, 1, 2, 3, 3, 4], [
RecordArray({
"q": PrimitiveArray([1, -1, -1, 1, 1]),
"pt": PrimitiveArray([10, 20, 30, 40, 50])
}),
RecordArray({
"pt": PrimitiveArray([1.1, 2.2, 3.3, 4.4])
})
])
assert egamma == [
{'pt': 10, 'q': 1},
{'pt': 20, 'q': -1},
{'pt': 1.1},
{'pt': 30, 'q': -1},
{'pt': 2.2},
{'pt': 3.3},
{'pt': 4.4},
{'pt': 40, 'q': 1},
{'pt': 50, 'q': 1}]
assert egamma["pt"] == [10, 20, 1.1, 30, 2.2, 3.3, 4.4, 40, 50]
egamma.setindex()
assert egamma.contents[0].contents["pt"].row == [(0,), (1,), (3,), (7,), (8,)]
assert egamma.contents[1].contents["pt"].row == [(2,), (4,), (5,), (6,)]
assert egamma.contents[0].contents["pt"].col == ("pt",)
assert egamma.contents[1].contents["pt"].col == ("pt",)
instantiate(egamma)
|
[
"awkwardql.data.PrimitiveArray",
"awkwardql.data.instantiate"
] |
[((3473, 3492), 'awkwardql.data.instantiate', 'instantiate', (['events'], {}), '(events)\n', (3484, 3492), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((4441, 4460), 'awkwardql.data.instantiate', 'instantiate', (['egamma'], {}), '(egamma)\n', (4452, 4460), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((800, 836), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[100, 200, 300, 400]'], {}), '([100, 200, 300, 400])\n', (814, 836), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((3617, 3650), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[1, -1, -1, 1, 1]'], {}), '([1, -1, -1, 1, 1])\n', (3631, 3650), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((3670, 3706), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[10, 20, 30, 40, 50]'], {}), '([10, 20, 30, 40, 50])\n', (3684, 3706), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((3759, 3795), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[1.1, 2.2, 3.3, 4.4]'], {}), '([1.1, 2.2, 3.3, 4.4])\n', (3773, 3795), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((404, 465), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]'], {}), '([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n', (418, 465), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((486, 533), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[0, 0, 100, 50, 30, 1, 2, 3, 4]'], {}), '([0, 0, 100, 50, 30, 1, 2, 3, 4])\n', (500, 533), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((634, 690), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[1, 2, 3, 4, 5, 100, 30, 50, 1, 2, 3, 4]'], {}), '([1, 2, 3, 4, 5, 100, 30, 50, 1, 2, 3, 4])\n', (648, 690), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n'), ((712, 771), 'awkwardql.data.PrimitiveArray', 'PrimitiveArray', (['[10, 10, 10, 10, 10, 5, 15, 15, 9, 8, 7, 6]'], {}), '([10, 10, 10, 10, 10, 5, 15, 15, 9, 8, 7, 6])\n', (726, 771), False, 'from awkwardql.data import RecordArray, PrimitiveArray, ListArray, UnionArray, instantiate\n')]
|
"""original source: https://github.com/chainer/chainerrl/pull/480
MIT License
Copyright (c) Preferred Networks, Inc.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import *
from future import standard_library
standard_library.install_aliases()
import argparse
from inspect import getsourcefile
import os
import sys
import numpy as np
import chainer
import minerl # noqa: register MineRL envs as Gym envs.
import gym
import chainerrl
from chainerrl import experiments, explorers
from chainerrl.experiments.evaluator import Evaluator
from dqfd import DQfD, PrioritizedDemoReplayBuffer
from q_functions import CNNBranchingQFunction
from env_wrappers import (
BranchedRandomizedAction, BranchedActionWrapper,
MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper,
PoVWithCompassAngleWrapper, FullObservationSpaceWrapper)
from expert_converter import choose_top_experts, fill_buffer
class ScaleGradHook(object):
name = 'ScaleGrad'
call_for_each_param = True
timing = 'pre'
def __init__(self, scale):
self.scale = scale
def __call__(self, rule, param):
if getattr(param, 'scale_param', False):
param.grad *= self.scale
def main():
"""Parses arguments and runs the example
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='MineRLTreechop-v0',
choices=[
'MineRLTreechop-v0',
'MineRLNavigate-v0', 'MineRLNavigateDense-v0', 'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0',
'MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0',
'MineRLNavigateDenseFixed-v0' # for debug use
],
help='MineRL environment identifier')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=-1,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--final-exploration-frames',
type=int, default=10**6,
help='Timesteps after which we stop ' +
'annealing exploration rate')
parser.add_argument('--final-epsilon', type=float, default=0.01,
help='Final value of epsilon during training.')
parser.add_argument('--eval-epsilon', type=float, default=0.001,
help='Exploration epsilon used during eval episodes.')
parser.add_argument('--replay-start-size', type=int, default=1000,
help='Minimum replay buffer size before ' +
'performing gradient updates.')
parser.add_argument('--target-update-interval', type=int, default=10**4,
help='Frequency (in timesteps) at which ' +
'the target network is updated.')
parser.add_argument('--update-interval', type=int, default=4,
help='Frequency (in timesteps) of network updates.')
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--no-clip-delta',
dest='clip_delta', action='store_false')
parser.add_argument('--error-max', type=float, default=1.0)
parser.add_argument('--num-step-return', type=int, default=10)
parser.set_defaults(clip_delta=True)
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--logging-filename', type=str, default=None)
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information are saved as output files when evaluation')
# parser.add_argument('--render', action='store_true', default=False,
# help='Render env states in a GUI window.')
parser.add_argument('--optimizer', type=str, default='rmsprop',
choices=['rmsprop', 'adam'])
parser.add_argument('--lr', type=float, default=2.5e-4, help='Learning rate')
parser.add_argument("--replay-buffer-size", type=int, default=10**6,
help="Size of replay buffer (Excluding demonstrations)")
parser.add_argument("--minibatch-size", type=int, default=32)
parser.add_argument('--batch-accumulator', type=str, default="sum")
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument("--save-demo-trajectories", action="store_true",
default=False)
# DQfD specific parameters for loading and pretraining.
parser.add_argument('--n-experts', type=int, default=10)
parser.add_argument('--expert-demo-path', type=str, default=None)
parser.add_argument('--n-pretrain-steps', type=int, default=750000)
parser.add_argument('--demo-supervised-margin', type=float, default=0.8)
parser.add_argument('--loss-coeff-l2', type=float, default=1e-5)
parser.add_argument('--loss-coeff-nstep', type=float, default=1.0)
parser.add_argument('--loss-coeff-supervised', type=float, default=1.0)
parser.add_argument('--bonus-priority-agent', type=float, default=0.001)
parser.add_argument('--bonus-priority-demo', type=float, default=1.0)
# Action branching architecture
parser.add_argument('--gradient-clipping', action='store_true', default=False)
parser.add_argument('--gradient-rescaling', action='store_true', default=False)
# NoisyNet parameters
parser.add_argument('--use-noisy-net', type=str, default=None,
choices=['before-pretraining', 'after-pretraining'])
parser.add_argument('--noisy-net-sigma', type=float, default=0.5)
# Parameters for state/action handling
parser.add_argument('--frame-stack', type=int, default=None, help='Number of frames stacked (None for disable).')
parser.add_argument('--frame-skip', type=int, default=None, help='Number of frames skipped (None for disable).')
parser.add_argument('--camera-atomic-actions', type=int, default=10)
parser.add_argument('--max-range-of-camera', type=float, default=10.)
parser.add_argument('--use-full-observation', action='store_true', default=False)
args = parser.parse_args()
assert args.expert_demo_path is not None,"DQfD needs collected \
expert demonstrations"
import logging
if args.logging_filename is not None:
logging.basicConfig(filename=args.logging_filename, filemode='w',
level=args.logging_level)
else:
logging.basicConfig(level=args.logging_level)
logger = logging.getLogger(__name__)
train_seed = args.seed
test_seed = 2 ** 31 - 1 - args.seed
chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
logger.info('Output files are saved in {}'.format(args.outdir))
if args.env == 'MineRLTreechop-v0':
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions]
elif args.env in ['MineRLNavigate-v0', 'MineRLNavigateDense-v0',
'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 2]
elif args.env in ['MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 32]
else:
raise Exception("Unknown environment")
def make_env(env, test):
# wrap env: observation...
# NOTE: wrapping order matters!
if args.use_full_observation:
env = FullObservationSpaceWrapper(env)
elif args.env.startswith('MineRLNavigate'):
env = PoVWithCompassAngleWrapper(env)
else:
env = ObtainPoVWrapper(env)
if test and args.monitor:
env = gym.wrappers.Monitor(
env, os.path.join(args.outdir, 'monitor'),
mode='evaluation' if test else 'training', video_callable=lambda episode_id: True)
if args.frame_skip is not None:
env = FrameSkip(env, skip=args.frame_skip)
# convert hwc -> chw as Chainer requires
env = MoveAxisWrapper(env, source=-1, destination=0,
use_tuple=args.use_full_observation)
#env = ScaledFloatFrame(env)
if args.frame_stack is not None:
env = FrameStack(env, args.frame_stack, channel_order='chw',
use_tuple=args.use_full_observation)
# wrap env: action...
env = BranchedActionWrapper(env, branch_sizes, args.camera_atomic_actions, args.max_range_of_camera)
if test:
env = BranchedRandomizedAction(env, branch_sizes, args.eval_epsilon)
env_seed = test_seed if test else train_seed
env.seed(int(env_seed))
return env
core_env = gym.make(args.env)
env = make_env(core_env, test=False)
eval_env = make_env(core_env, test=True)
# Q function
if args.env.startswith('MineRLNavigate'):
if args.use_full_observation:
base_channels = 3 # RGB
else:
base_channels = 4 # RGB + compass
elif args.env.startswith('MineRLObtain'):
base_channels = 3 # RGB
else:
base_channels = 3 # RGB
if args.frame_stack is None:
n_input_channels = base_channels
else:
n_input_channels = base_channels * args.frame_stack
q_func = CNNBranchingQFunction(branch_sizes,
n_input_channels=n_input_channels,
gradient_rescaling=args.gradient_rescaling,
use_tuple=args.use_full_observation)
def phi(x):
# observation -> NN input
if args.use_full_observation:
pov = np.asarray(x[0], dtype=np.float32)
others = np.asarray(x[1], dtype=np.float32)
return (pov / 255, others)
else:
return np.asarray(x, dtype=np.float32) / 255
explorer = explorers.LinearDecayEpsilonGreedy(
1.0, args.final_epsilon,
args.final_exploration_frames,
lambda: np.array([np.random.randint(n) for n in branch_sizes]))
# Draw the computational graph and save it in the output directory.
if args.use_full_observation:
sample_obs = tuple([x[None] for x in env.observation_space.sample()])
else:
sample_obs = env.observation_space.sample()[None]
chainerrl.misc.draw_computational_graph(
[q_func(phi(sample_obs))], os.path.join(args.outdir, 'model'))
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
if args.use_noisy_net is None:
opt.setup(q_func)
if args.gradient_rescaling:
opt.add_hook(ScaleGradHook(1 / (1 + len(q_func.branch_sizes))))
if args.gradient_clipping:
opt.add_hook(chainer.optimizer_hooks.GradientClipping(10.0))
# calculate corresponding `steps` and `eval_interval` according to frameskip
maximum_frames = 8640000 # = 1440 episodes if we count an episode as 6000 frames.
if args.frame_skip is None:
steps = maximum_frames
eval_interval = 6000 * 100 # (approx.) every 100 episode (counts "1 episode = 6000 steps")
else:
steps = maximum_frames // args.frame_skip
eval_interval = 6000 * 100 // args.frame_skip # (approx.) every 100 episode (counts "1 episode = 6000 steps")
# Anneal beta from beta0 to 1 throughout training
betasteps = steps / args.update_interval
replay_buffer = PrioritizedDemoReplayBuffer(
args.replay_buffer_size, alpha=0.4,
beta0=0.6, betasteps=betasteps,
error_max=args.error_max,
num_steps=args.num_step_return)
# Fill the demo buffer with expert transitions
if not args.demo:
chosen_dirs = choose_top_experts(args.expert_demo_path, args.n_experts,
logger=logger)
fill_buffer(args.env, chosen_dirs, replay_buffer, args.frame_skip,
args.frame_stack, args.camera_atomic_actions,
args.max_range_of_camera, args.use_full_observation,
logger=logger)
logger.info("Demo buffer loaded with {} transitions".format(
len(replay_buffer)))
def reward_transform(x):
return np.sign(x) * np.log(1 + np.abs(x))
if args.use_noisy_net is not None and args.use_noisy_net == 'before-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
opt.setup(q_func)
agent = DQfD(q_func, opt, replay_buffer,
gamma=0.99,
explorer=explorer,
n_pretrain_steps=args.n_pretrain_steps,
demo_supervised_margin=args.demo_supervised_margin,
bonus_priority_agent=args.bonus_priority_agent,
bonus_priority_demo=args.bonus_priority_demo,
loss_coeff_nstep=args.loss_coeff_nstep,
loss_coeff_supervised=args.loss_coeff_supervised,
loss_coeff_l2=args.loss_coeff_l2,
gpu=args.gpu,
replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
clip_delta=args.clip_delta,
update_interval=args.update_interval,
batch_accumulator=args.batch_accumulator,
phi=phi, reward_transform=reward_transform,
minibatch_size=args.minibatch_size)
if args.use_noisy_net is not None and args.use_noisy_net == 'after-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
opt.setup(q_func)
opt.add_hook(
chainer.optimizer_hooks.WeightDecay(args.loss_coeff_l2))
agent.optimizer = opt
agent.target_model = None
agent.sync_target_network()
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs)
logger.info('n_runs: {} mean: {} median: {} stdev: {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev']))
else:
agent.pretrain()
evaluator = Evaluator(agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
eval_interval=eval_interval,
outdir=args.outdir,
max_episode_len=None,
env=eval_env,
step_offset=0,
save_best_so_far_agent=True,
logger=logger)
# Evaluate the agent BEFORE training begins
evaluator.evaluate_and_update_max_score(t=0, episodes=0)
experiments.train_agent(agent=agent,
env=env,
steps=steps,
outdir=args.outdir,
max_episode_len=None,
step_offset=0,
evaluator=evaluator,
successful_score=None,
step_hooks=[])
env.close()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"argparse.ArgumentParser",
"chainerrl.explorers.Greedy",
"future.standard_library.install_aliases",
"q_functions.CNNBranchingQFunction",
"numpy.random.randint",
"expert_converter.choose_top_experts",
"expert_converter.fill_buffer",
"os.path.join",
"chainerrl.links.to_factorized_noisy",
"env_wrappers.FullObservationSpaceWrapper",
"chainerrl.misc.set_random_seed",
"env_wrappers.BranchedActionWrapper",
"chainerrl.experiments.prepare_output_dir",
"chainer.optimizer_hooks.WeightDecay",
"env_wrappers.BranchedRandomizedAction",
"env_wrappers.FrameStack",
"dqfd.DQfD",
"chainerrl.experiments.train_agent",
"chainerrl.experiments.evaluator.Evaluator",
"chainer.optimizers.RMSpropGraves",
"chainerrl.experiments.eval_performance",
"env_wrappers.PoVWithCompassAngleWrapper",
"numpy.asarray",
"env_wrappers.MoveAxisWrapper",
"env_wrappers.FrameSkip",
"chainer.optimizers.Adam",
"gym.make",
"logging.basicConfig",
"chainer.optimizer_hooks.GradientClipping",
"numpy.sign",
"env_wrappers.ObtainPoVWrapper",
"dqfd.PrioritizedDemoReplayBuffer",
"logging.getLogger"
] |
[((332, 366), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (364, 366), False, 'from future import standard_library\n'), ((1390, 1415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1413, 1415), False, 'import argparse\n'), ((7221, 7248), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7238, 7248), False, 'import logging\n'), ((7322, 7381), 'chainerrl.misc.set_random_seed', 'chainerrl.misc.set_random_seed', (['args.seed'], {'gpus': '(args.gpu,)'}), '(args.seed, gpus=(args.gpu,))\n', (7352, 7381), False, 'import chainerrl\n'), ((7401, 7450), 'chainerrl.experiments.prepare_output_dir', 'experiments.prepare_output_dir', (['args', 'args.outdir'], {}), '(args, args.outdir)\n', (7431, 7450), False, 'from chainerrl import experiments, explorers\n'), ((9636, 9654), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (9644, 9654), False, 'import gym\n'), ((10222, 10382), 'q_functions.CNNBranchingQFunction', 'CNNBranchingQFunction', (['branch_sizes'], {'n_input_channels': 'n_input_channels', 'gradient_rescaling': 'args.gradient_rescaling', 'use_tuple': 'args.use_full_observation'}), '(branch_sizes, n_input_channels=n_input_channels,\n gradient_rescaling=args.gradient_rescaling, use_tuple=args.\n use_full_observation)\n', (10243, 10382), False, 'from q_functions import CNNBranchingQFunction\n'), ((12435, 12597), 'dqfd.PrioritizedDemoReplayBuffer', 'PrioritizedDemoReplayBuffer', (['args.replay_buffer_size'], {'alpha': '(0.4)', 'beta0': '(0.6)', 'betasteps': 'betasteps', 'error_max': 'args.error_max', 'num_steps': 'args.num_step_return'}), '(args.replay_buffer_size, alpha=0.4, beta0=0.6,\n betasteps=betasteps, error_max=args.error_max, num_steps=args.\n num_step_return)\n', (12462, 12597), False, 'from dqfd import DQfD, PrioritizedDemoReplayBuffer\n'), ((13516, 14225), 'dqfd.DQfD', 'DQfD', (['q_func', 'opt', 'replay_buffer'], {'gamma': '(0.99)', 'explorer': 'explorer', 'n_pretrain_steps': 'args.n_pretrain_steps', 'demo_supervised_margin': 'args.demo_supervised_margin', 'bonus_priority_agent': 'args.bonus_priority_agent', 'bonus_priority_demo': 'args.bonus_priority_demo', 'loss_coeff_nstep': 'args.loss_coeff_nstep', 'loss_coeff_supervised': 'args.loss_coeff_supervised', 'loss_coeff_l2': 'args.loss_coeff_l2', 'gpu': 'args.gpu', 'replay_start_size': 'args.replay_start_size', 'target_update_interval': 'args.target_update_interval', 'clip_delta': 'args.clip_delta', 'update_interval': 'args.update_interval', 'batch_accumulator': 'args.batch_accumulator', 'phi': 'phi', 'reward_transform': 'reward_transform', 'minibatch_size': 'args.minibatch_size'}), '(q_func, opt, replay_buffer, gamma=0.99, explorer=explorer,\n n_pretrain_steps=args.n_pretrain_steps, demo_supervised_margin=args.\n demo_supervised_margin, bonus_priority_agent=args.bonus_priority_agent,\n bonus_priority_demo=args.bonus_priority_demo, loss_coeff_nstep=args.\n loss_coeff_nstep, loss_coeff_supervised=args.loss_coeff_supervised,\n loss_coeff_l2=args.loss_coeff_l2, gpu=args.gpu, replay_start_size=args.\n replay_start_size, target_update_interval=args.target_update_interval,\n clip_delta=args.clip_delta, update_interval=args.update_interval,\n batch_accumulator=args.batch_accumulator, phi=phi, reward_transform=\n reward_transform, minibatch_size=args.minibatch_size)\n', (13520, 14225), False, 'from dqfd import DQfD, PrioritizedDemoReplayBuffer\n'), ((7023, 7119), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.logging_filename', 'filemode': '"""w"""', 'level': 'args.logging_level'}), "(filename=args.logging_filename, filemode='w', level=\n args.logging_level)\n", (7042, 7119), False, 'import logging\n'), ((7161, 7206), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'args.logging_level'}), '(level=args.logging_level)\n', (7180, 7206), False, 'import logging\n'), ((8945, 9033), 'env_wrappers.MoveAxisWrapper', 'MoveAxisWrapper', (['env'], {'source': '(-1)', 'destination': '(0)', 'use_tuple': 'args.use_full_observation'}), '(env, source=-1, destination=0, use_tuple=args.\n use_full_observation)\n', (8960, 9033), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9321, 9420), 'env_wrappers.BranchedActionWrapper', 'BranchedActionWrapper', (['env', 'branch_sizes', 'args.camera_atomic_actions', 'args.max_range_of_camera'], {}), '(env, branch_sizes, args.camera_atomic_actions, args.\n max_range_of_camera)\n', (9342, 9420), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((11290, 11324), 'os.path.join', 'os.path.join', (['args.outdir', '"""model"""'], {}), "(args.outdir, 'model')\n", (11302, 11324), False, 'import os\n'), ((11377, 11454), 'chainer.optimizers.RMSpropGraves', 'chainer.optimizers.RMSpropGraves', (['args.lr'], {'alpha': '(0.95)', 'momentum': '(0.0)', 'eps': '(0.01)'}), '(args.lr, alpha=0.95, momentum=0.0, eps=0.01)\n', (11409, 11454), False, 'import chainer\n'), ((12718, 12790), 'expert_converter.choose_top_experts', 'choose_top_experts', (['args.expert_demo_path', 'args.n_experts'], {'logger': 'logger'}), '(args.expert_demo_path, args.n_experts, logger=logger)\n', (12736, 12790), False, 'from expert_converter import choose_top_experts, fill_buffer\n'), ((12841, 13031), 'expert_converter.fill_buffer', 'fill_buffer', (['args.env', 'chosen_dirs', 'replay_buffer', 'args.frame_skip', 'args.frame_stack', 'args.camera_atomic_actions', 'args.max_range_of_camera', 'args.use_full_observation'], {'logger': 'logger'}), '(args.env, chosen_dirs, replay_buffer, args.frame_skip, args.\n frame_stack, args.camera_atomic_actions, args.max_range_of_camera, args\n .use_full_observation, logger=logger)\n', (12852, 13031), False, 'from expert_converter import choose_top_experts, fill_buffer\n'), ((13360, 13437), 'chainerrl.links.to_factorized_noisy', 'chainerrl.links.to_factorized_noisy', (['q_func'], {'sigma_scale': 'args.noisy_net_sigma'}), '(q_func, sigma_scale=args.noisy_net_sigma)\n', (13395, 13437), False, 'import chainerrl\n'), ((13457, 13475), 'chainerrl.explorers.Greedy', 'explorers.Greedy', ([], {}), '()\n', (13473, 13475), False, 'from chainerrl import experiments, explorers\n'), ((14569, 14646), 'chainerrl.links.to_factorized_noisy', 'chainerrl.links.to_factorized_noisy', (['q_func'], {'sigma_scale': 'args.noisy_net_sigma'}), '(q_func, sigma_scale=args.noisy_net_sigma)\n', (14604, 14646), False, 'import chainerrl\n'), ((14666, 14684), 'chainerrl.explorers.Greedy', 'explorers.Greedy', ([], {}), '()\n', (14682, 14684), False, 'from chainerrl import experiments, explorers\n'), ((15219, 15321), 'chainerrl.experiments.eval_performance', 'experiments.eval_performance', ([], {'env': 'eval_env', 'agent': 'agent', 'n_steps': 'None', 'n_episodes': 'args.eval_n_runs'}), '(env=eval_env, agent=agent, n_steps=None,\n n_episodes=args.eval_n_runs)\n', (15247, 15321), False, 'from chainerrl import experiments, explorers\n'), ((15552, 15769), 'chainerrl.experiments.evaluator.Evaluator', 'Evaluator', ([], {'agent': 'agent', 'n_steps': 'None', 'n_episodes': 'args.eval_n_runs', 'eval_interval': 'eval_interval', 'outdir': 'args.outdir', 'max_episode_len': 'None', 'env': 'eval_env', 'step_offset': '(0)', 'save_best_so_far_agent': '(True)', 'logger': 'logger'}), '(agent=agent, n_steps=None, n_episodes=args.eval_n_runs,\n eval_interval=eval_interval, outdir=args.outdir, max_episode_len=None,\n env=eval_env, step_offset=0, save_best_so_far_agent=True, logger=logger)\n', (15561, 15769), False, 'from chainerrl.experiments.evaluator import Evaluator\n'), ((16159, 16342), 'chainerrl.experiments.train_agent', 'experiments.train_agent', ([], {'agent': 'agent', 'env': 'env', 'steps': 'steps', 'outdir': 'args.outdir', 'max_episode_len': 'None', 'step_offset': '(0)', 'evaluator': 'evaluator', 'successful_score': 'None', 'step_hooks': '[]'}), '(agent=agent, env=env, steps=steps, outdir=args.\n outdir, max_episode_len=None, step_offset=0, evaluator=evaluator,\n successful_score=None, step_hooks=[])\n', (16182, 16342), False, 'from chainerrl import experiments, explorers\n'), ((8365, 8397), 'env_wrappers.FullObservationSpaceWrapper', 'FullObservationSpaceWrapper', (['env'], {}), '(env)\n', (8392, 8397), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8844, 8880), 'env_wrappers.FrameSkip', 'FrameSkip', (['env'], {'skip': 'args.frame_skip'}), '(env, skip=args.frame_skip)\n', (8853, 8880), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9155, 9251), 'env_wrappers.FrameStack', 'FrameStack', (['env', 'args.frame_stack'], {'channel_order': '"""chw"""', 'use_tuple': 'args.use_full_observation'}), "(env, args.frame_stack, channel_order='chw', use_tuple=args.\n use_full_observation)\n", (9165, 9251), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9452, 9514), 'env_wrappers.BranchedRandomizedAction', 'BranchedRandomizedAction', (['env', 'branch_sizes', 'args.eval_epsilon'], {}), '(env, branch_sizes, args.eval_epsilon)\n', (9476, 9514), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((10559, 10593), 'numpy.asarray', 'np.asarray', (['x[0]'], {'dtype': 'np.float32'}), '(x[0], dtype=np.float32)\n', (10569, 10593), True, 'import numpy as np\n'), ((10615, 10649), 'numpy.asarray', 'np.asarray', (['x[1]'], {'dtype': 'np.float32'}), '(x[1], dtype=np.float32)\n', (10625, 10649), True, 'import numpy as np\n'), ((11504, 11536), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', (['args.lr'], {}), '(args.lr)\n', (11527, 11536), False, 'import chainer\n'), ((11756, 11802), 'chainer.optimizer_hooks.GradientClipping', 'chainer.optimizer_hooks.GradientClipping', (['(10.0)'], {}), '(10.0)\n', (11796, 11802), False, 'import chainer\n'), ((13230, 13240), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (13237, 13240), True, 'import numpy as np\n'), ((14744, 14821), 'chainer.optimizers.RMSpropGraves', 'chainer.optimizers.RMSpropGraves', (['args.lr'], {'alpha': '(0.95)', 'momentum': '(0.0)', 'eps': '(0.01)'}), '(args.lr, alpha=0.95, momentum=0.0, eps=0.01)\n', (14776, 14821), False, 'import chainer\n'), ((14972, 15027), 'chainer.optimizer_hooks.WeightDecay', 'chainer.optimizer_hooks.WeightDecay', (['args.loss_coeff_l2'], {}), '(args.loss_coeff_l2)\n', (15007, 15027), False, 'import chainer\n'), ((8468, 8499), 'env_wrappers.PoVWithCompassAngleWrapper', 'PoVWithCompassAngleWrapper', (['env'], {}), '(env)\n', (8494, 8499), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8532, 8553), 'env_wrappers.ObtainPoVWrapper', 'ObtainPoVWrapper', (['env'], {}), '(env)\n', (8548, 8553), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8649, 8685), 'os.path.join', 'os.path.join', (['args.outdir', '"""monitor"""'], {}), "(args.outdir, 'monitor')\n", (8661, 8685), False, 'import os\n'), ((10722, 10753), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (10732, 10753), True, 'import numpy as np\n'), ((14879, 14911), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', (['args.lr'], {}), '(args.lr)\n', (14902, 14911), False, 'import chainer\n'), ((10910, 10930), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (10927, 10930), True, 'import numpy as np\n'), ((13254, 13263), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (13260, 13263), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import unittest
from pygraph import util
class TestUtil(unittest.TestCase):
def test_pointsToEdges(self):
points = [(1, 1), (2, 2), (3, 3)]
expected = [
((1, 1), (2, 2)),
((2, 2), (3, 3)),
((3, 3), (1, 1))
]
self.assertListEqual(expected, util.pointsToEdges(points))
|
[
"pygraph.util.pointsToEdges"
] |
[((338, 364), 'pygraph.util.pointsToEdges', 'util.pointsToEdges', (['points'], {}), '(points)\n', (356, 364), False, 'from pygraph import util\n')]
|
import setuptools
setuptools.setup(
name="PyQNLPSimulator",
version="0.1",
author="<NAME> (ICHEC), <NAME> (ICHEC)",
author_email="<EMAIL>, <EMAIL>",
description="Quantum NLP package",
long_description="Quantum NLP project @ ICHEC",
url="https://github.com/ichec/qnlp",
packages=setuptools.find_packages(),
package_data={'': ['_PyQNLPSimulator.*.so'],},
classifiers=[
"Programming Language :: Python :: 3",
],
)
|
[
"setuptools.find_packages"
] |
[((311, 337), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (335, 337), False, 'import setuptools\n')]
|
#!/usr/bin/env python3
import numpy as np
import matching.cr_search_validation_matcher
import utils.data_format_keys as dfk
import sys
from evaluation.link_metrics import LinkMetricsResults
from multiprocessing import Pool
from utils.utils import read_json, save_json
def modify_simple_threshold(dataset, threshold):
for item in dataset:
if item[dfk.DATASET_SCORE] is not None and \
item[dfk.DATASET_SCORE] < threshold:
item[dfk.DATASET_TARGET_TEST][dfk.CR_ITEM_DOI] = None
return dataset
def find_best(results):
overall = [r[1].get(dfk.EVAL_F1) for r in results]
index = len(overall) - overall[::-1].index(max(overall)) - 1
return index, results[index][0], results[index][1].get(dfk.EVAL_PREC), \
results[index][1].get(dfk.EVAL_REC), results[index][1].get(dfk.EVAL_F1)
dataset = read_json(sys.argv[1])['dataset']
matcher = matching.cr_search_validation_matcher.Matcher(0.4, 0, [])
with Pool(10) as p:
results = p.map(matcher.match,
[item.get('ref_string') for item in dataset])
for item, target in zip(dataset, results):
item['target_test']['DOI'] = target[0]
item['score'] = target[1]
save_json(dataset, sys.argv[2])
results_valid_threshold = \
[(t, LinkMetricsResults(modify_simple_threshold(dataset, t)))
for t in np.arange(0.0, 1.0, 0.01)]
print(','.join([str(i) for i in find_best(results_valid_threshold)[1:]]))
|
[
"utils.utils.read_json",
"numpy.arange",
"utils.utils.save_json",
"multiprocessing.Pool"
] |
[((1193, 1224), 'utils.utils.save_json', 'save_json', (['dataset', 'sys.argv[2]'], {}), '(dataset, sys.argv[2])\n', (1202, 1224), False, 'from utils.utils import read_json, save_json\n'), ((852, 874), 'utils.utils.read_json', 'read_json', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (861, 874), False, 'from utils.utils import read_json, save_json\n'), ((960, 968), 'multiprocessing.Pool', 'Pool', (['(10)'], {}), '(10)\n', (964, 968), False, 'from multiprocessing import Pool\n'), ((1334, 1359), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.01)'], {}), '(0.0, 1.0, 0.01)\n', (1343, 1359), True, 'import numpy as np\n')]
|
import pprint
import json
pp = pprint.PrettyPrinter(indent = 4)
def convert(path):
with open(path, 'r') as read_obj:
line = 'init'
counter = 0
l = []
while line:
line = read_obj.readline()
counter += 1
if counter == 1:
continue
fields = line.split('\t')
if len(fields) != 2:
continue
l.append({'id':fields[0], 'rate':float(fields[1].strip())})
return l
def main():
return convert('unemployment.tsv')
if __name__ == '__main__':
pp.pprint(main())
|
[
"pprint.PrettyPrinter"
] |
[((31, 61), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (51, 61), False, 'import pprint\n')]
|
__author__ = 'cjoakim'
import math
from .elapsed_time import ElapsedTime
class Speed(object):
def __init__(self, d, et):
self.dist = d # an instance of Distance
self.etime = et # an instance of ElapsedTime
def mph(self):
return self.dist.as_miles() / self.etime.hours()
def kph(self):
return self.dist.as_kilometers() / self.etime.hours()
def yph(self):
return self.dist.as_yards() / self.etime.hours()
def pace_per_mile(self):
spm = self.seconds_per_mile()
mm = math.floor(spm / 60.0)
ss = spm - (mm * 60.0)
if ss < 10:
ss = "0{0}".format(ss)
else:
ss = "{0}".format(ss)
if len(ss) > 5:
ss = ss[0:5]
return "{0}:{1}".format(mm, ss)
def seconds_per_mile(self):
return float(self.etime.secs / self.dist.as_miles())
def projected_time(self, another_distance, algorithm='simple'):
if algorithm is 'riegel':
t1 = float(self.etime.secs)
d1 = self.dist.as_miles()
d2 = another_distance.as_miles()
t2 = t1 * math.pow(float(d2 / d1), float(1.06))
et = ElapsedTime(t2)
return et.as_hhmmss()
else:
secs = float(self.seconds_per_mile() * another_distance.as_miles())
et = ElapsedTime(secs)
return et.as_hhmmss()
def age_graded(self, event_age, graded_age):
ag_factor = event_age.max_pulse() / graded_age.max_pulse()
graded_secs = float((self.etime.secs)) * float(ag_factor)
graded_et = ElapsedTime(graded_secs)
return Speed(self.dist, graded_et)
def __str__(self):
template = "<Speed dist:{0} etime:{1}>"
return template.format(self.dist, self.etime)
|
[
"math.floor"
] |
[((552, 574), 'math.floor', 'math.floor', (['(spm / 60.0)'], {}), '(spm / 60.0)\n', (562, 574), False, 'import math\n')]
|
import telebot
from telebot import types
import os
import random
from PIL import ImageGrab
from winsound import Beep
import requests
import platform
import psutil
import time
# proxy = 'http://192.168.88.170:8888'
# os.environ['http_proxy'] = proxy
# os.environ['HTTP_PROXY'] = proxy
# os.environ['https_proxy'] = proxy
# os.environ['HTTPS_PROXY'] = proxy
start_time = time.time()
os.system("cls")
token = 'Your Token Here ;) ' #str
bot = telebot.TeleBot(token)
admin = 440904809 # here Enter Your Telegram UserId (int)
bot.send_message(admin, 'سیستم روشن شد!')
def getfile(filename):
myfile = open(filename, "r+", encoding='utf-8')
return myfile.read()
def putfile(filename, filedata):
myfile = open(filename, "w+", encoding='utf-8')
myfile.write(filedata)
myfile.close()
def startcm(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True)
btn1 = types.KeyboardButton('ScreenShot📸')
btn2 = types.KeyboardButton('Power Option ⚠️')
btn3 = types.KeyboardButton('Sound🔉')
btn4 = types.KeyboardButton('File Manager🗄')
btn5 = types.KeyboardButton('System Info💻')
btn6 = types.KeyboardButton('Open Web🌍')
btns.add(btn1, btn2, btn3, btn4, btn5, btn6)
message = '''
سلام خوش آمدید.😄
لیست دستورات برای فقط شماست😜
'''
bot.send_message(chat_id, message, reply_markup=btns)
# print(id)
def savedb(user):
chat_id = user.from_user.id
text = user.text
con_text = text.replace('/save ', '')
# con_text = con_text.encode("utf-8")
mesid = random.randint(1111, 9999)
message = f'پیام شما: \n {con_text} \n شناسه پیام : {mesid}'
bot.send_message(chat_id, message)
putfile(f'database/data_{mesid}.txt', str(con_text))
# print(con_text, mesid)
def savedb_lsit(user):
chat_id = user.from_user.id
list_file = ''
for r, d, f in os.walk('database'):
for file in f:
list_file = list_file + '\n' + str(file)
bot.send_message(chat_id, 'پیام های شما: \n' + str(list_file))
def power(user):
chat_id = user.from_user.id
# text = user.text
btns = types.ReplyKeyboardMarkup(row_width=1, one_time_keyboard=True)
btn1 = types.KeyboardButton('ShoutDown | خاموش کردن')
btn2 = types.KeyboardButton('ریستارت | Restart')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3, )
bot.send_message(chat_id, 'شما به بخش power option وارد شدید.لیست دستورات: \n', reply_markup=btns)
def home(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True)
btn1 = types.KeyboardButton('ScreenShot📸')
btn2 = types.KeyboardButton('Power Option ⚠️')
btn3 = types.KeyboardButton('Sound🔉')
btn4 = types.KeyboardButton('File Manager🗄')
btn5 = types.KeyboardButton('System Info💻')
btn6 = types.KeyboardButton('Open Web🌍')
btns.add(btn1, btn2, btn3, btn4, btn5, btn6)
bot.send_message(chat_id, '🏛صفحه اصلی: ', reply_markup=btns)
def playmusic_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('Music 🎧')
btn2 = types.KeyboardButton('Beep')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3)
message = '''
🤔لطفا نوع صدا را انتخاب کنید
'''
bot.send_message(chat_id, message, reply_markup=btns)
def bep(user):
chat_id = user.from_user.id
bot.send_message(chat_id, 'بوق پخش شد😉')
for x in range(1, 6):
Beep(1000 * x, 200)
Beep(1000 * x, 200 - (x * 50))
def music(user):
chat_id = user.from_user.id
message = '''
لطفا آهنگ خود را بفرستید تا برایتان پخش کنم!😊
یا از دستور زیر استفاده کنید👇:
/music [File_id]
برای دریافت آیدی ها دستور زیر را بزنید👇:
/music_list
'''
bot.send_message(chat_id, message)
def music_id(user):
chat_id = user.from_user.id
list_file = ''
music_count = 0
for r, d, f in os.walk('music'):
for file in f:
if 'mp3' in file:
music_count += 1
list_file = list_file + '\n' + str(file)
else:
pass
message = f'''
تعداد آهنگ ها:{music_count}
لیست آهنگ های ذخیره شده:
{list_file}
'''
bot.send_message(chat_id, message)
def music_play(user):
chat_id = user.from_user.id
text = user.text
music_name = text.replace('/music ', '')
os.system(f'start music/{str(music_name)}.mp3')
musiv = open(f'music/{str(music_name)}.mp3', 'rb')
message = f'''
آهنگ با کد اختصاصی زیر درحال پخش:🎶
{str(music_name)}
'''
bot.send_message(chat_id, message)
bot.send_chat_action(chat_id, 'upload_document')
bot.send_audio(chat_id, musiv, caption='آهنگ درحال پخش😐')
def screenshot(user):
chat_id = user.from_user.id
message = 'گرفتن اسکرین...'
bot.send_message(chat_id, message)
photo = ImageGrab.grab()
photo.save('screen.png')
message = 'اسکرین شات گرفته شد!😋'
bot.send_message(chat_id, message)
photo = open('screen.png', 'rb')
bot.send_photo(chat_id, photo)
photo.close()
photo = open('screen.png', 'rb')
bot.send_document(chat_id, photo, caption='اسکرین گرفته شده نسخه با کیفیت 🙄')
photo.close()
os.remove('screen.png')
# print(chat_id)
def systeminfo(user):
chat_id = user.from_user.id
uname = platform.uname()
runtime = time.time() - start_time
if runtime < 60:
runtime = f'{int(runtime)} Second'
else:
runtime = runtime / 60
runtime = f'{int(runtime)} Minutes'
message = f'''
🔰 System: {uname.system} {uname.release}
👥 Node Name: {uname.node}
🔺 CPU Usage {psutil.cpu_percent()} Percent
🔺 RAM Usage: {psutil.virtual_memory()[2]} Percent
📝 Machine Architecture: {uname.machine}
⏱ Bot Run Time: {runtime}
'''
bot.send_message(chat_id, message)
def shutdown_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('آره مطمئنم میخوام خاموش بشه!!')
btn2 = types.KeyboardButton('نه دستم خورد !!')
btns.add(btn1, btn2, )
message = '''
آیا مطمئن هستید که سیستم خاموش شود؟🤨
'''
bot.send_message(chat_id, message, reply_markup=btns)
def restart_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('آره مطمئنم میخوام ریستارت بشه!!')
btn2 = types.KeyboardButton('نه دستم خورد !!')
btns.add(btn1, btn2)
message = '''
آیا مطمئن هستید که سیستم ریستارت شود؟🤨
'''
bot.send_message(chat_id, message, reply_markup=btns)
def download_btn(user):
chat_id = user.from_user.id
text = user.text
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('Download File From System📥')
btn2 = types.KeyboardButton('File List📂')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3)
bot.send_message(chat_id, 'به فایل منجر خوش آمدید😅', reply_markup=btns)
def downlaod_message(user):
chat_id = user.from_user.id
# text = user.text
bot.send_message(chat_id, 'نحوه استفاده \n /download [file name or file address]')
def download_file(user):
chat_id = user.from_user.id
text = user.text
filename_or_address = text.replace('/download ', '')
if os.path.isdir(filename_or_address):
bot.send_message(chat_id, 'این یک فولدر هست و قابل دانلود نیست😑')
else:
if os.path.isfile(filename_or_address):
file = open(filename_or_address, 'rb')
bot.send_message(chat_id, 'درحال آپلود کردن فایل درخواستی شما...')
bot.send_document(chat_id, file, caption='این فایل درخواستی شماست 😁')
else:
bot.send_message(chat_id, 'فایل یا فولدری با این نام پیدا نشد.🤐')
def web_btn(user):
chat_id = user.from_user.id
text = user.text
bot.send_message(chat_id, 'نحوه استفاده \n /web [URL]')
def filemanagerlist(user):
userchatid = user.chat.id
usertext = user.text
directory = usertext.replace("/filemanager ", "")
if (os.path.isdir(directory)):
bot.send_message(userchatid, "🔎 درحال اسکن کردن فولدر ...")
foldercount = 0
folderlist = ""
filecount = 0
filelist = ""
for r, d, f in os.walk(directory):
for folder in d:
if (foldercount > 30 or foldercount == 30):
break
else:
if ("\\" in r):
pass
else:
foldercount += 1
folderlist = folderlist + "\n" + "📁 " + r + "/" + folder
for file in f:
if (filecount > 30 or filecount == 30):
break
else:
filecount += 1
filelist = filelist + "\n" + "🧾 " + r + "/" + file
bot.send_message(userchatid, "🗂 30 First Folders In " + directory + " : \n\n" + str(folderlist))
bot.send_message(userchatid, "🗃 30 First File In " + directory + " : \n\n" + str(filelist))
else:
bot.send_message(userchatid, "چیزی پیدا نکردم 😐")
def justfilelist(user):
userchatid = user.chat.id
bot.send_message(userchatid, "نحوه استفاده:\n/filemanager [dir]")
@bot.message_handler(content_types=['text'])
def main(user):
chat_id = user.from_user.id
text = user.text
# print(chat_id)
if chat_id == admin:
if text == '/start':
startcm(user)
if text == '/save':
bot.send_message(chat_id, 'لطفا بعد از دستور پیام خود را اضافه کنید\n به این صورت : \n /save [message] ')
if text.startswith('/save '):
savedb(user)
if text == '/message':
savedb_lsit(user)
if text == 'Power Option ⚠️':
power(user)
if text == 'ShoutDown | خاموش کردن':
shutdown_btn(user)
if text == 'ریستارت | Restart':
restart_btn(user)
if text == 'آره مطمئنم میخوام خاموش بشه!!':
bot.send_message(chat_id, 'سیستم شما خاموش شد!😐')
os.system('shutdown /s /t 1')
home(user)
if text == 'آره مطمئنم میخوام ریستارت بشه!!':
bot.send_message(chat_id, 'سیستم شما ریستارت شد!😐')
os.system('shutdown /r /t 1')
home(user)
if text == 'بازگشت':
home(user)
if text == 'نه دستم خورد !!':
bot.send_message(chat_id, 'کنترل دستت هم نداری بدبخت 😂')
home(user)
if text == 'ScreenShot📸':
screenshot(user)
if text == 'Sound🔉':
playmusic_btn(user)
if text == 'Beep':
bep(user)
if text == 'Music 🎧':
music(user)
if text == 'System Info💻':
systeminfo(user)
if text == 'File Manager🗄':
download_btn(user)
if text == 'Download File From System📥':
downlaod_message(user)
if text.startswith('/download '):
download_file(user)
if text == '/download':
downlaod_message(user)
if text == 'Open Web🌍':
web_btn(user)
if text.startswith('/web '):
url = text.replace('/web ', '')
bot.send_message(chat_id, f'گوگل کروم با آدرس شما[{url}] باز شد🥳')
os.system(f"start chrome {url}")
if text == '/web':
web_btn(user)
if (text == "File List📂" or text == "/filemanager"):
justfilelist(user)
if (text.startswith("/filemanager ")):
filemanagerlist(user)
if text.startswith('/music '):
music_play(user)
if text == '/music':
music(user)
if text == '/music_list':
music_id(user)
else:
bot.send_message(chat_id, 'شما ادمین نیستید 😐')
@bot.message_handler(content_types=['audio'])
def audio(message):
chat_id = message.from_user.id
# print(message.audio)
raw = message.audio.file_id
# file_name = message.audio.file_unique_id
title = message.audio.title
title = title.strip()
title = title.replace(' ', '_')
performer = message.audio.performer
performer = performer.strip()
performer = performer.replace(' ', '_')
file_size = message.audio.file_size
if file_size > 20971520:
bot.send_message(chat_id, 'محدودیت های تلگرام حجم فایل بیش از 20مگابایت هست 🤐')
else:
try:
file_info = bot.get_file(raw)
downloaded_file = bot.download_file(file_info.file_path)
with open(f'music/{str(performer)}-{str(title).strip()}.mp3', 'wb') as new_file:
new_file.write(downloaded_file)
os.system(f'start music/{str(performer)}-{str(title).strip()}.mp3')
bot.send_message(chat_id, 'درحال پخش آهنگ شما...😯')
bot.send_message(chat_id, 'کد اختصاصی این آهنگ👇')
bot.send_message(chat_id, f'``` /music {str(performer)}-{str(title).strip()}```', parse_mode='markdown')
except:
bot.send_message(chat_id, 'این آهنگ درحال پخش است😒')
try:
bot.polling(True)
except:
print(' I Got Error :( ')
|
[
"os.remove",
"psutil.virtual_memory",
"telebot.types.KeyboardButton",
"random.randint",
"telebot.types.ReplyKeyboardMarkup",
"PIL.ImageGrab.grab",
"os.path.isdir",
"winsound.Beep",
"os.walk",
"os.system",
"platform.uname",
"time.time",
"os.path.isfile",
"telebot.TeleBot",
"psutil.cpu_percent"
] |
[((388, 399), 'time.time', 'time.time', ([], {}), '()\n', (397, 399), False, 'import time\n'), ((401, 417), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (410, 417), False, 'import os\n'), ((463, 485), 'telebot.TeleBot', 'telebot.TeleBot', (['token'], {}), '(token)\n', (478, 485), False, 'import telebot\n'), ((905, 954), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (930, 954), False, 'from telebot import types\n'), ((967, 1002), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""ScreenShot📸"""'], {}), "('ScreenShot📸')\n", (987, 1002), False, 'from telebot import types\n'), ((1015, 1054), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Power Option ⚠️"""'], {}), "('Power Option ⚠️')\n", (1035, 1054), False, 'from telebot import types\n'), ((1067, 1097), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Sound🔉"""'], {}), "('Sound🔉')\n", (1087, 1097), False, 'from telebot import types\n'), ((1110, 1147), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""File Manager🗄"""'], {}), "('File Manager🗄')\n", (1130, 1147), False, 'from telebot import types\n'), ((1160, 1196), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""System Info💻"""'], {}), "('System Info💻')\n", (1180, 1196), False, 'from telebot import types\n'), ((1209, 1242), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Open Web🌍"""'], {}), "('Open Web🌍')\n", (1229, 1242), False, 'from telebot import types\n'), ((1628, 1654), 'random.randint', 'random.randint', (['(1111)', '(9999)'], {}), '(1111, 9999)\n', (1642, 1654), False, 'import random\n'), ((1950, 1969), 'os.walk', 'os.walk', (['"""database"""'], {}), "('database')\n", (1957, 1969), False, 'import os\n'), ((2208, 2270), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'row_width': '(1)', 'one_time_keyboard': '(True)'}), '(row_width=1, one_time_keyboard=True)\n', (2233, 2270), False, 'from telebot import types\n'), ((2283, 2329), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""ShoutDown | خاموش کردن"""'], {}), "('ShoutDown | خاموش کردن')\n", (2303, 2329), False, 'from telebot import types\n'), ((2342, 2383), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""ریستارت | Restart"""'], {}), "('ریستارت | Restart')\n", (2362, 2383), False, 'from telebot import types\n'), ((2396, 2426), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""بازگشت"""'], {}), "('بازگشت')\n", (2416, 2426), False, 'from telebot import types\n'), ((2631, 2680), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (2656, 2680), False, 'from telebot import types\n'), ((2693, 2728), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""ScreenShot📸"""'], {}), "('ScreenShot📸')\n", (2713, 2728), False, 'from telebot import types\n'), ((2741, 2780), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Power Option ⚠️"""'], {}), "('Power Option ⚠️')\n", (2761, 2780), False, 'from telebot import types\n'), ((2793, 2823), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Sound🔉"""'], {}), "('Sound🔉')\n", (2813, 2823), False, 'from telebot import types\n'), ((2836, 2873), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""File Manager🗄"""'], {}), "('File Manager🗄')\n", (2856, 2873), False, 'from telebot import types\n'), ((2886, 2922), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""System Info💻"""'], {}), "('System Info💻')\n", (2906, 2922), False, 'from telebot import types\n'), ((2935, 2968), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Open Web🌍"""'], {}), "('Open Web🌍')\n", (2955, 2968), False, 'from telebot import types\n'), ((3160, 3222), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)', 'row_width': '(1)'}), '(one_time_keyboard=True, row_width=1)\n', (3185, 3222), False, 'from telebot import types\n'), ((3235, 3266), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Music 🎧"""'], {}), "('Music 🎧')\n", (3255, 3266), False, 'from telebot import types\n'), ((3279, 3307), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Beep"""'], {}), "('Beep')\n", (3299, 3307), False, 'from telebot import types\n'), ((3320, 3350), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""بازگشت"""'], {}), "('بازگشت')\n", (3340, 3350), False, 'from telebot import types\n'), ((4121, 4137), 'os.walk', 'os.walk', (['"""music"""'], {}), "('music')\n", (4128, 4137), False, 'import os\n'), ((5106, 5122), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (5120, 5122), False, 'from PIL import ImageGrab\n'), ((5477, 5500), 'os.remove', 'os.remove', (['"""screen.png"""'], {}), "('screen.png')\n", (5486, 5500), False, 'import os\n'), ((5596, 5612), 'platform.uname', 'platform.uname', ([], {}), '()\n', (5610, 5612), False, 'import platform\n'), ((6197, 6259), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)', 'row_width': '(1)'}), '(one_time_keyboard=True, row_width=1)\n', (6222, 6259), False, 'from telebot import types\n'), ((6272, 6325), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""آره مطمئنم میخوام خاموش بشه!!"""'], {}), "('آره مطمئنم میخوام خاموش بشه!!')\n", (6292, 6325), False, 'from telebot import types\n'), ((6338, 6377), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""نه دستم خورد !!"""'], {}), "('نه دستم خورد !!')\n", (6358, 6377), False, 'from telebot import types\n'), ((6616, 6678), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)', 'row_width': '(1)'}), '(one_time_keyboard=True, row_width=1)\n', (6641, 6678), False, 'from telebot import types\n'), ((6691, 6746), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""آره مطمئنم میخوام ریستارت بشه!!"""'], {}), "('آره مطمئنم میخوام ریستارت بشه!!')\n", (6711, 6746), False, 'from telebot import types\n'), ((6759, 6798), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""نه دستم خورد !!"""'], {}), "('نه دستم خورد !!')\n", (6779, 6798), False, 'from telebot import types\n'), ((7068, 7130), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)', 'row_width': '(1)'}), '(one_time_keyboard=True, row_width=1)\n', (7093, 7130), False, 'from telebot import types\n'), ((7143, 7193), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""Download File From System📥"""'], {}), "('Download File From System📥')\n", (7163, 7193), False, 'from telebot import types\n'), ((7206, 7240), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""File List📂"""'], {}), "('File List📂')\n", (7226, 7240), False, 'from telebot import types\n'), ((7253, 7283), 'telebot.types.KeyboardButton', 'types.KeyboardButton', (['"""بازگشت"""'], {}), "('بازگشت')\n", (7273, 7283), False, 'from telebot import types\n'), ((7722, 7756), 'os.path.isdir', 'os.path.isdir', (['filename_or_address'], {}), '(filename_or_address)\n', (7735, 7756), False, 'import os\n'), ((8499, 8523), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (8512, 8523), False, 'import os\n'), ((3639, 3658), 'winsound.Beep', 'Beep', (['(1000 * x)', '(200)'], {}), '(1000 * x, 200)\n', (3643, 3658), False, 'from winsound import Beep\n'), ((3668, 3696), 'winsound.Beep', 'Beep', (['(1000 * x)', '(200 - x * 50)'], {}), '(1000 * x, 200 - x * 50)\n', (3672, 3696), False, 'from winsound import Beep\n'), ((5628, 5639), 'time.time', 'time.time', ([], {}), '()\n', (5637, 5639), False, 'import time\n'), ((7856, 7891), 'os.path.isfile', 'os.path.isfile', (['filename_or_address'], {}), '(filename_or_address)\n', (7870, 7891), False, 'import os\n'), ((8721, 8739), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (8728, 8739), False, 'import os\n'), ((5937, 5957), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (5955, 5957), False, 'import psutil\n'), ((10611, 10640), 'os.system', 'os.system', (['"""shutdown /s /t 1"""'], {}), "('shutdown /s /t 1')\n", (10620, 10640), False, 'import os\n'), ((10800, 10829), 'os.system', 'os.system', (['"""shutdown /r /t 1"""'], {}), "('shutdown /r /t 1')\n", (10809, 10829), False, 'import os\n'), ((11902, 11934), 'os.system', 'os.system', (['f"""start chrome {url}"""'], {}), "(f'start chrome {url}')\n", (11911, 11934), False, 'import os\n'), ((5984, 6007), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (6005, 6007), False, 'import psutil\n')]
|
"""Test cases for dual bytes/str APIs"""
import unittest
"""
The Python 2 str type conveniently permitted the creation of APIs that
could be used as either binary APIs (8-bit str in, 8-bit str out) or as
text APIs (unicode in, unicode out).
The critical enabler for this feature was the ability to define any
*constants* used in these algorithms as 8 bit strings, and then rely on
the implicit promotion to Unicode to handle text input.
In Python 3, that implicit conversion to Unicode is gone, so APIs that
handle both binary and text data need to be written to either have two
separate code paths, or else to automatically decode binary input to text
and then convert it back to binary output again when returning the result.
However, it should be possible to create a Python 3 extension type that
inherits from str (providing interoperability with str objects) and *also*
implements the buffer API (providing interoperability with bytes and
bytearray, and likely other types).
This is a test suite developed on Python 2, demonstrating the convenience
of the implicit conversion in the case of such dual binary/text interfaces.
While the general recommendation for Python 3 code is to ensure APIs are
either binary *or* text rather than a hybrid combination, libraries
migrating from Python 2 that already publish such hybrid APIs may need to
continue to support both styles of usage for the benefit of clients (as
some clients may be using the binary half of the interface, while others
are using the text half).
The URL parsing APIs in Python 3's urllib.parse module are an example of
such an API. It supported both str and unicode in Python 2 and supports
both str and any type with a decode method in Python 3"""
try:
from asciicompat import asciistr
except ImportError:
# Python 2 fallback
asciistr = str
# Developing the tests on Python 2
try:
text_type = unicode
except:
text_type = str
binary_type = bytes
asciistr = str
# Some test values
TEXT = u"text"
BINARY = b"binary"
HYBRID = asciistr("ascii")
class TestHybridAddition(unittest.TestCase):
def test_text_addition(self):
self.assertEqual(TEXT + HYBRID, u"textascii")
self.assertIsInstance(TEXT + HYBRID, text_type)
self.assertEqual(HYBRID + TEXT, u"asciitext")
self.assertIsInstance(HYBRID + TEXT, text_type)
def test_binary_addition(self):
self.assertEqual(BINARY + HYBRID, b"binaryascii")
self.assertIsInstance(BINARY + HYBRID, binary_type)
# Next two are likely to be affected by
# http://bugs.python.org/issue11477
# as the str subclass on the LHS will throw TypeError directly
# as returning NotImplemented from sq_concat is not currently
# supported correctly
self.assertEqual(HYBRID + BINARY, b"asciibinary")
self.assertIsInstance(HYBRID + BINARY, binary_type)
class HybridTestMixin(object):
input_data = None
output_type = None
exists = asciistr("data")
missing = asciistr("not data")
def test_containment(self):
self.assertIn(self.exists, self.input_data)
self.assertIn(self.exists[:2], self.input_data)
self.assertNotIn(self.missing, self.input_data)
def test_partitioning(self):
before, sep, after = self.input_data.partition(self.exists)
self.assertIsInstance(before, self.output_type)
self.assertIsInstance(sep, self.output_type)
self.assertIsInstance(after, self.output_type)
self.assertEqual(sep, self.exists)
def test_casting(self):
self.assertEqual(self.output_type(self.exists), self.exists)
self.assertIs(type(self.output_type(self.exists)), self.output_type)
# Formatting tests: in Python 2, str formatting always produces
# str objects, *except* when a Unicode object is passed to mod-formatting
def test_mod_formatting(self):
formatted = asciistr("%s") % self.input_data
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), self.output_type)
formatted_int = asciistr("%d") % 42
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
def test_format_method(self):
formatted = asciistr("{}").format(self.input_data)
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), binary_type)
formatted_int = asciistr("{:d}").format(42)
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
class TestBinaryInteraction(unittest.TestCase, HybridTestMixin):
input_data = b"there is binary data in this test case"
output_type = binary_type
class TestTextInteraction(unittest.TestCase, HybridTestMixin):
input_data = u"there is text data in this test case"
output_type = text_type
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"asciicompat.asciistr"
] |
[((2025, 2042), 'asciicompat.asciistr', 'asciistr', (['"""ascii"""'], {}), "('ascii')\n", (2033, 2042), False, 'from asciicompat import asciistr\n'), ((2971, 2987), 'asciicompat.asciistr', 'asciistr', (['"""data"""'], {}), "('data')\n", (2979, 2987), False, 'from asciicompat import asciistr\n'), ((3002, 3022), 'asciicompat.asciistr', 'asciistr', (['"""not data"""'], {}), "('not data')\n", (3010, 3022), False, 'from asciicompat import asciistr\n'), ((5041, 5056), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5054, 5056), False, 'import unittest\n'), ((3906, 3920), 'asciicompat.asciistr', 'asciistr', (['"""%s"""'], {}), "('%s')\n", (3914, 3920), False, 'from asciicompat import asciistr\n'), ((4073, 4087), 'asciicompat.asciistr', 'asciistr', (['"""%d"""'], {}), "('%d')\n", (4081, 4087), False, 'from asciicompat import asciistr\n'), ((4203, 4215), 'asciicompat.asciistr', 'asciistr', (['(42)'], {}), '(42)\n', (4211, 4215), False, 'from asciicompat import asciistr\n'), ((4634, 4646), 'asciicompat.asciistr', 'asciistr', (['(42)'], {}), '(42)\n', (4642, 4646), False, 'from asciicompat import asciistr\n'), ((4328, 4342), 'asciicompat.asciistr', 'asciistr', (['"""{}"""'], {}), "('{}')\n", (4336, 4342), False, 'from asciicompat import asciistr\n'), ((4496, 4512), 'asciicompat.asciistr', 'asciistr', (['"""{:d}"""'], {}), "('{:d}')\n", (4504, 4512), False, 'from asciicompat import asciistr\n')]
|
"""
This file is part of the magtifun.abgeo.dev.
(c) 2021 <NAME> <<EMAIL>>
For the full copyright and license information, please view the LICENSE
file that was distributed with this source code.
"""
from datetime import timedelta
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi import APIRouter
from app.core.config import ACCESS_TOKEN_EXPIRE_MINUTES
from app.models.schemas.jwt import Token
from app.resources import strings
from app.services.jwt import create_access_token
from app.services.magtifun import authenticate_user
router = APIRouter(tags=["Auth"])
@router.post("/token", response_model=Token)
async def token_authentication(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
"""
Create Authentication JWT Token.
"""
user = authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail=strings.INCORRECT_LOGIN_INPUT,
headers={"WWW-Authenticate": "Bearer"},
)
access_token = create_access_token(
data={"sub": user.key},
expires_delta=timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES),
)
return Token(access_token=access_token, token_type="bearer")
|
[
"fastapi.HTTPException",
"app.services.magtifun.authenticate_user",
"datetime.timedelta",
"fastapi.Depends",
"app.models.schemas.jwt.Token",
"fastapi.APIRouter"
] |
[((615, 639), 'fastapi.APIRouter', 'APIRouter', ([], {'tags': "['Auth']"}), "(tags=['Auth'])\n", (624, 639), False, 'from fastapi import APIRouter\n'), ((762, 771), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (769, 771), False, 'from fastapi import Depends, HTTPException, status\n'), ((850, 907), 'app.services.magtifun.authenticate_user', 'authenticate_user', (['form_data.username', 'form_data.password'], {}), '(form_data.username, form_data.password)\n', (867, 907), False, 'from app.services.magtifun import authenticate_user\n'), ((1281, 1334), 'app.models.schemas.jwt.Token', 'Token', ([], {'access_token': 'access_token', 'token_type': '"""bearer"""'}), "(access_token=access_token, token_type='bearer')\n", (1286, 1334), False, 'from app.models.schemas.jwt import Token\n'), ((939, 1077), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': 'strings.INCORRECT_LOGIN_INPUT', 'headers': "{'WWW-Authenticate': 'Bearer'}"}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.\n INCORRECT_LOGIN_INPUT, headers={'WWW-Authenticate': 'Bearer'})\n", (952, 1077), False, 'from fastapi import Depends, HTTPException, status\n'), ((1215, 1261), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'ACCESS_TOKEN_EXPIRE_MINUTES'}), '(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n', (1224, 1261), False, 'from datetime import timedelta\n')]
|
#!/usr/bin/env python3
#
# visualize_contingency_tables.py: Visualizes all contingency tables
# obtained by our method in the form of a diagram in the plane.
#
# Input: JSON file with shapelets
#
# Output: A set of points in the plane, each representing one table,
# such that the distance to the origin refers to preferences
# in splitting behaviour.
#
# The output will be written to `stdout`.
import argparse
import json
import sys
import numpy as np
def transform_table(table):
"""
Transforms a contingency table into a point on a two-dimensional
plane, in which the distance to the origin shows the suitability
of a contingency table for separating cases and controls.
"""
# Yes, this ordering is correct. Please refer to our paper for
# more details.
a, b, d, c = table
n1 = a+b
n0 = c+d
return (a-b) / n1, (c-d) / n0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Contingency Table Visualization")
parser.add_argument("input",
metavar = "INPUT",
help = "Input file"
)
parser.add_argument("-f", "--flip",
required = False,
action = "store_true",
help = "If set, flips values in the visualization to ensure that quadrant 3 is not used"
)
parser.add_argument("-p", "--prune",
required = False,
action = "store_true",
help = "If set, prunes duplicates points"
)
arguments = parser.parse_args()
input_file = arguments.input
flip = arguments.flip
prune = arguments.prune
with open(input_file) as f:
data = json.load(f)
shapelets = data["shapelets"]
tables = []
for shapelet in shapelets:
tables.append( shapelet["table"] )
points = []
for table in tables:
x,y = transform_table(table)
if flip and ( (x < 0 and y < 0) or (np.sign(x) != np.sign(y) and -x > y) ):
x,y = -y,-x
points.append( (x,y) )
if prune:
points = set(points)
for x,y in points:
print("{}\t{}".format(x,y))
|
[
"json.load",
"argparse.ArgumentParser",
"numpy.sign"
] |
[((909, 979), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Contingency Table Visualization"""'}), "(description='Contingency Table Visualization')\n", (932, 979), False, 'import argparse\n'), ((1572, 1584), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1581, 1584), False, 'import json\n'), ((1816, 1826), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (1823, 1826), True, 'import numpy as np\n'), ((1830, 1840), 'numpy.sign', 'np.sign', (['y'], {}), '(y)\n', (1837, 1840), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: See LICENSE file
# Copyright: 2020 (c) The Alan Turing Institute
from flask import current_app, flash
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import (
DataRequired,
Email,
EqualTo,
Optional,
ValidationError,
)
from app.models import User
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
submit = SubmitField("Sign In")
class RegistrationForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
email = StringField("Email", validators=[DataRequired(), Email()])
fullname = StringField("Full Name (optional)", validators=[])
password = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField(
"Repeat Password", validators=[DataRequired(), EqualTo("password")]
)
toc = BooleanField(
"I agree to the Terms and Conditions.", validators=[DataRequired()]
)
credit = BooleanField(
"Check this box if you would like to be publically credited with having "
"contributed to this work. By default, users will remain anonymous.",
validators=[Optional()],
)
updated = BooleanField(
"Check this box if you wish to be kept up to date with the "
"progress of this work by email.",
validators=[Optional()],
)
submit = SubmitField("Register")
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(
"Username already in use, please use a different one."
)
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(
"Email address already in use, please use a different one."
)
if current_app.config["USER_EMAILS"]:
if email.data in current_app.config["USER_EMAILS"]:
return
if current_app.config["USER_EMAIL_DOMAINS"]:
if (
not email.data.split("@")[-1]
in current_app.config["USER_EMAIL_DOMAINS"]
):
raise ValidationError(
"Access to AnnotateChange is restricted to "
"individuals with email addresses from specific "
"institutions. Please use your employee email address "
"when signing up. If that does not solve the issue, "
"you unfortunately do not have access to "
"AnnotateChange at this time."
)
def validate_credit(self, credit):
if credit.data and not self.fullname.data:
flash(
"Please provide your full name if you wish to "
"be credited with contributing to this work.", "error")
raise ValidationError(
"Please provide your full name if you wish to "
"be credited with contributing to this work."
)
class ResetPasswordRequestForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
submit = SubmitField("Request password reset")
class ResetPasswordForm(FlaskForm):
password = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField(
"<PASSWORD>", validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Request Password Reset")
|
[
"flask.flash",
"wtforms.validators.Email",
"wtforms.SubmitField",
"app.models.User.query.filter_by",
"wtforms.validators.Optional",
"wtforms.validators.EqualTo",
"wtforms.StringField",
"wtforms.validators.DataRequired",
"wtforms.validators.ValidationError"
] |
[((595, 617), 'wtforms.SubmitField', 'SubmitField', (['"""Sign In"""'], {}), "('Sign In')\n", (606, 617), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((809, 859), 'wtforms.StringField', 'StringField', (['"""Full Name (optional)"""'], {'validators': '[]'}), "('Full Name (optional)', validators=[])\n", (820, 859), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((1567, 1590), 'wtforms.SubmitField', 'SubmitField', (['"""Register"""'], {}), "('Register')\n", (1578, 1590), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((3436, 3473), 'wtforms.SubmitField', 'SubmitField', (['"""Request password reset"""'], {}), "('Request password reset')\n", (3447, 3473), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((3703, 3740), 'wtforms.SubmitField', 'SubmitField', (['"""Request Password Reset"""'], {}), "('Request Password Reset')\n", (3714, 3740), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((1750, 1821), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Username already in use, please use a different one."""'], {}), "('Username already in use, please use a different one.')\n", (1765, 1821), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1999, 2075), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Email address already in use, please use a different one."""'], {}), "('Email address already in use, please use a different one.')\n", (2014, 2075), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((2989, 3105), 'flask.flash', 'flash', (['"""Please provide your full name if you wish to be credited with contributing to this work."""', '"""error"""'], {}), "(\n 'Please provide your full name if you wish to be credited with contributing to this work.'\n , 'error')\n", (2994, 3105), False, 'from flask import current_app, flash\n'), ((3150, 3267), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please provide your full name if you wish to be credited with contributing to this work."""'], {}), "(\n 'Please provide your full name if you wish to be credited with contributing to this work.'\n )\n", (3165, 3267), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((495, 509), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (507, 509), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((565, 579), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (577, 579), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((706, 720), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (718, 720), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((768, 782), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (780, 782), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((784, 791), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (789, 791), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((913, 927), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (925, 927), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1000, 1014), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1012, 1014), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1016, 1035), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (1023, 1035), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1127, 1141), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1139, 1141), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1356, 1366), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (1364, 1366), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1535, 1545), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (1543, 1545), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((1650, 1694), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (1670, 1694), False, 'from app.models import User\n'), ((1905, 1943), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (1925, 1943), False, 'from app.models import User\n'), ((2452, 2742), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Access to AnnotateChange is restricted to individuals with email addresses from specific institutions. Please use your employee email address when signing up. If that does not solve the issue, you unfortunately do not have access to AnnotateChange at this time."""'], {}), "(\n 'Access to AnnotateChange is restricted to individuals with email addresses from specific institutions. Please use your employee email address when signing up. If that does not solve the issue, you unfortunately do not have access to AnnotateChange at this time.'\n )\n", (2467, 2742), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((3397, 3411), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3409, 3411), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((3413, 3420), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (3418, 3420), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((3565, 3579), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3577, 3579), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((3647, 3661), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3659, 3661), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n'), ((3663, 3682), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (3670, 3682), False, 'from wtforms.validators import DataRequired, Email, EqualTo, Optional, ValidationError\n')]
|
import pytest
from openshift_checks.logging.curator import Curator
def canned_curator(exec_oc=None):
"""Create a Curator check object with canned exec_oc method"""
check = Curator("dummy") # fails if a module is actually invoked
if exec_oc:
check._exec_oc = exec_oc
return check
def assert_error(error, expect_error):
if expect_error:
assert error
assert expect_error in error
else:
assert not error
plain_curator_pod = {
"metadata": {
"labels": {"component": "curator", "deploymentconfig": "logging-curator"},
"name": "logging-curator-1",
},
"status": {
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
"podIP": "10.10.10.10",
}
}
not_running_curator_pod = {
"metadata": {
"labels": {"component": "curator", "deploymentconfig": "logging-curator"},
"name": "logging-curator-2",
},
"status": {
"containerStatuses": [{"ready": False}],
"conditions": [{"status": "False", "type": "Ready"}],
"podIP": "10.10.10.10",
}
}
@pytest.mark.parametrize('pods, expect_error', [
(
[],
"no Curator pods",
),
(
[plain_curator_pod],
None,
),
(
[not_running_curator_pod],
"not currently in a running state",
),
(
[plain_curator_pod, plain_curator_pod],
"more than one Curator pod",
),
])
def test_get_curator_pods(pods, expect_error):
check = canned_curator()
error = check.check_curator(pods)
assert_error(error, expect_error)
|
[
"pytest.mark.parametrize",
"openshift_checks.logging.curator.Curator"
] |
[((1140, 1390), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pods, expect_error"""', "[([], 'no Curator pods'), ([plain_curator_pod], None), ([\n not_running_curator_pod], 'not currently in a running state'), ([\n plain_curator_pod, plain_curator_pod], 'more than one Curator pod')]"], {}), "('pods, expect_error', [([], 'no Curator pods'), ([\n plain_curator_pod], None), ([not_running_curator_pod],\n 'not currently in a running state'), ([plain_curator_pod,\n plain_curator_pod], 'more than one Curator pod')])\n", (1163, 1390), False, 'import pytest\n'), ((183, 199), 'openshift_checks.logging.curator.Curator', 'Curator', (['"""dummy"""'], {}), "('dummy')\n", (190, 199), False, 'from openshift_checks.logging.curator import Curator\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trac', '0016_auto_20160303_2353'),
]
operations = [
migrations.AddField(
model_name='checkpoint',
name='distance_units',
field=models.CharField(default=b'mi', max_length=2, choices=[(b'm', b'meters'), (b'km', b'kilometers'), (b'mi', b'miles')]),
),
]
|
[
"django.db.models.CharField"
] |
[((359, 481), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'mi'", 'max_length': '(2)', 'choices': "[(b'm', b'meters'), (b'km', b'kilometers'), (b'mi', b'miles')]"}), "(default=b'mi', max_length=2, choices=[(b'm', b'meters'), (\n b'km', b'kilometers'), (b'mi', b'miles')])\n", (375, 481), False, 'from django.db import models, migrations\n')]
|
"""Unittests for metrics lib."""
from llama import ping
from llama import util
import pytest
def fake_runcmd(cmd):
stderr = '''
--- shelby hping statistic ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.1/0.1/0.2 ms
'''
stdout = '''
HPING shelby (eth0 192.168.3.11): S set, 40 headers + 0 data bytes
len=46 ip=1.1.7.5 ttl=61 DF id=4696 sport=0 flags=RA seq=0 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4699 sport=0 flags=RA seq=1 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4701 sport=0 flags=RA seq=2 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4702 sport=0 flags=RA seq=3 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4704 sport=0 flags=RA seq=4 win=0 rtt=0.1 ms
'''
return 0, stdout, stderr
class TestHping3(object):
def silence_pyflakes(self):
"""PyFlakes complains because we don't explicitly use the module."""
dir(pytest)
def test_good(self, monkeypatch):
monkeypatch.setattr(util, 'runcmd', fake_runcmd)
assert ping.hping3('somehost', count=5) == ('0', '0.1', 'somehost')
|
[
"llama.ping.hping3"
] |
[((1078, 1110), 'llama.ping.hping3', 'ping.hping3', (['"""somehost"""'], {'count': '(5)'}), "('somehost', count=5)\n", (1089, 1110), False, 'from llama import ping\n')]
|
"""Module throttle.
========
Throttle
========
The throttle allows you to limit the rate at which a function is
executed. This is helpful to avoid exceeding a limit, such as when
sending requests to an internet service that specifies a limit as to the
number of requests that can be sent in a specific time interval.
The throttle package include four different algorithms for the limiting
control, each provided as a decorator or as a class:
1. **@throttle_sync** decorator and **ThrottleSync** class provide a
synchronous algorithm.
For synchronous throttling, you specify the *requests* and
*seconds* which determine the send rate limit. The throttle
keeps track of the intervals between each request and will block
only as needed to ensure the send rate limit is not exceeded.
This algorithm provides a strict adherence to the send rate limit
for those cases that need it.
2. **@throttle_sync_ec** decorator and **ThrottleSyncEc** class
provide an early arrival algorithm.
For synchronous throttling with the early arrival algorithm, you
specify the *requests* and *seconds* which determine the send
rate limit. You also specify an *early_count*, the number of
requests the throttle will send immediately without delay. Once
the *early_count* is reached, the throttle kicks in and, if
needed, delays the next request by a cumulative amount that
reflects the current request and the requests that were sent
early. This will ensure that the average send rate for all
requests stay within the send rate limit. This algorithm is best
used when you have a steady stream of requests within the send
rate limit, and an occasional burst of requests that the target
service will tolerate.
3. **@throttle_sync_lb** decorator and **ThrottleSyncLb** class
provide a leaky bucket algorithm.
For synchronous throttling with the leaky bucket algorithm, you
specify the *requests* and *seconds* which determine the send
rate limit. You also specify an *lb_threshold* value, the number
of requests that will fit into a conceptual bucket. As each
request is received, if it fits, it is placed into the bucket and
is sent. The bucket leaks at a fixed rate that reflects the send
rate limit such such that each new request will fit given it does
not exceed the send rate limit. If the bucket becomes full, the
next request will be delayed until the bucket has leaked enough
to hold it, at which time it will be sent. Unlike the early count
algorithm, the leaky bucket algorithm results in an average send
rate that slightly exceeds the send rate limit. This algorithm is
best used when you have a steady stream of requests within the
send rate limit, and an occasional burst of requests that the
target service will tolerate.
4. **@throttle_async** decorator and **ThrottleAsync** class provide
an asynchronous algorithm.
With asynchronous throttling, you specify the *requests* and
*seconds* which determine the send rate limit. As each request is
received, it is placed on a queue and control returns to the
caller. A separate request schedular thread pulls the requests
from the queue and sends them at a steady interval to achieve the
specified send rate limit. You may also specify an *async_q_size*
that determines the number of requests that can build up on the
queue before the caller is blocked while trying to add requests.
This algorithm provides a strict adherence to the send rate limit
without having the delay the user (unless the queue become full).
This is best used when you have a steady stream of requests
within the send rate limit, and an occasional burst of requests
that you do not want to be delayed for. It has an added
responsibility that you need to perform a shutdown of the
throttle when your program ends to ensure that request schedular
thread is properly ended.
:Example: 1) Wrapping a function with the **@throttle_sync** decorator
Here we are using the **@throttle_sync** decorator to wrap a function
that needs to be limited to no more than 2 executions per second. In the
following code, make_request will be called 10 times in rapid
succession. The **@throttle_sync** keeps track of the time for each
invocation and will insert a wait as needed to stay within the limit.
The first execution of make_request will be done immediately while the
remaining executions will each be delayed by 1/2 second as seen in the
output messages.
>>> from scottbrian_throttle.throttle import throttle_sync
>>> import time
>>> @throttle_sync(requests=2, seconds=1)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 2) Using the **ThrottleSync** class
Here's the same example from above, but instead of the decorator we use
the **ThrottleSync** class. Note that the loop now calls send_request,
passing in the make_request function and its arguments:
>>> from scottbrian_throttle.throttle import ThrottleSync
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSync(requests=2, seconds=1)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 3) Wrapping a function with the **@throttle_sync_ec**
decorator
Here we continue with the same example, only this time using the
**@throttle_sync_ec** decorator to see how its algorithm in action.
We will use the same *requests* of 2 and *seconds* of 1, and an
*early_count* of 2. The make_request function will again be called 10
times in rapid succession. The **@throttle_sync_ec** will allow the
first request to proceed immediately. The next two requests are
considered early, so they will be allowed to proceed as well. The third
request will be delayed to allow the throttle to catch up to where we
should be, and then the process will repeat with some requests going
early followed by a catch-up delay. We can see this behavior in the
messages that show the intervals.
>>> from scottbrian_throttle.throttle import throttle_sync_ec
>>> import time
>>> @throttle_sync_ec(requests=2, seconds=1, early_count=2)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 1.5
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.0
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 4.5
:Example: 4) Using the **ThrottleSyncEc** class
Here we show the early count with the **ThrottleSyncEc** class:
>>> from scottbrian_throttle.throttle import ThrottleSyncEc
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSyncEc(requests=2, seconds=1, early_count=2)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 1.5
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.0
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 4.5
:Example: 5) Wrapping a function with the **@throttle_sync_lb**
decorator
We now take the early count example from above and switch in the leaky
bucket algorithm instead. We will use the *requests* of 2, *seconds* of
1, and *lb_threshold* of 3. The make_request function will again be
called 10 times in rapid succession. The **@throttle_sync_lb** will
be able to fit the first three requests into the bucket and send them
immediately. The fourth request will not fit into the bucket which now
causes the throttle to delay to allow the bucket to leak out one of the
requests. After the delay, the fourth request is placed into the bucket
and sent, follwed immediately by the fifth and sunsequent requests, each
of which are delayed to allow the bucket to accomodate them. We can see
this behavior in the messages that show the intervals.
>>> from scottbrian_throttle.throttle import throttle_sync_lb
>>> import time
>>> @throttle_sync_lb(requests=2, seconds=1, lb_threshold=3)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 0.5
request 4 sent at elapsed time: 1.0
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 2.0
request 7 sent at elapsed time: 2.5
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 3.5
:Example: 6) Using the **ThrottleSyncLb** class
Here we show the leaky bucket example using the **ThrottleSyncLb**
class:
>>> from scottbrian_throttle.throttle import ThrottleSyncLb
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSyncLb(requests=2, seconds=1, lb_threshold=3)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 0.5
request 4 sent at elapsed time: 1.0
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 2.0
request 7 sent at elapsed time: 2.5
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 3.5
:Example: 7) Wrapping a function with the **@throttle_async** decorator
We now continue with the same setup from above, only now we are using
the **@throttle_async** decorator. We will again specify *requests* of
2 and *seconds* of 1. The make_request function will be called 10
times in rapid succession. The **@throttle_aync_lb** will queue the
requests to the request queue and the schedule_request method running
under a separate thread will dequeue and execute them at the send rate
interval determined by the requests and seconds arguments (in this case,
1/2 second). This will have similar behavior to the throttle_sync
algorithm, except that the request are executed from a separate thread.
>>> from scottbrian_throttle.throttle import throttle_async
>>> import time
>>> @throttle_async(requests=2, seconds=1)
... def make_request(request_number, time_of_start):
... results.append(f'request {request_number} sent at elapsed time:'
... f' {time.time() - time_of_start:0.1f}')
>>> results = []
>>> start_time = time.time()
>>> for i in range(10):
... _ = make_request(i, start_time)
>>> shutdown_throttle_funcs(make_request)
>>> for line in results:
... print(line)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 8) Using the **ThrottleSyncAsync** class
Here we continue with the same setup, only now using the
**ThrottleSyncAsync** class:
>>> from scottbrian_throttle.throttle import ThrottleAsync
>>> import time
>>> def make_request(request_number, time_of_start):
... results.append(f'request {request_number} sent at elapsed time:'
... f' {time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleAsync(requests=2, seconds=1)
>>> results = []
>>> start_time = time.time()
>>> for i in range(10):
... _ = a_throttle.send_request(make_request, i, start_time)
>>> _ = a_throttle.start_shutdown()
>>> for line in results:
... print(line)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
The throttle module contains:
1) Throttle class object factory:
2) Error exception classes:
3) @throttle decorator
"""
########################################################################
# Standard Library
########################################################################
import functools
import logging
import queue
import threading
import time
from typing import (Any, Callable, cast, Final, NamedTuple, Optional,
overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union)
from typing_extensions import TypeAlias
########################################################################
# Third Party
########################################################################
from scottbrian_utils.pauser import Pauser
from wrapt.decorators import decorator # type: ignore
########################################################################
# Local
########################################################################
########################################################################
# type aliases and TypeVars
########################################################################
IntFloat: TypeAlias = Union[int, float]
OptIntFloat: TypeAlias = Optional[IntFloat]
# T = TypeVar('T', bound=Throttle)
########################################################################
# Throttle class exceptions
########################################################################
class ThrottleError(Exception):
"""Base class for exceptions in this module."""
pass
class IllegalSoftShutdownAfterHard(ThrottleError):
"""Throttle exception for illegal soft shutdown after hard."""
pass
class IncorrectAsyncQSizeSpecified(ThrottleError):
"""Throttle exception for incorrect async_q_size specification."""
pass
class IncorrectEarlyCountSpecified(ThrottleError):
"""Throttle exception for incorrect early_count specification."""
pass
class IncorrectLbThresholdSpecified(ThrottleError):
"""Throttle exception for incorrect lb_threshold specification."""
pass
class IncorrectModeSpecified(ThrottleError):
"""Throttle exception for incorrect mode specification."""
pass
class IncorrectRequestsSpecified(ThrottleError):
"""Throttle exception for incorrect requests specification."""
pass
class IncorrectSecondsSpecified(ThrottleError):
"""Throttle exception for incorrect seconds specification."""
pass
class IncorrectShutdownTypeSpecified(ThrottleError):
"""Throttle exception for incorrect shutdown_type specification."""
pass
class MissingEarlyCountSpecification(ThrottleError):
"""Throttle exception for missing early_count specification."""
pass
class MissingLbThresholdSpecification(ThrottleError):
"""Throttle exception for missing lb_threshold specification."""
pass
########################################################################
# get_throttle
########################################################################
# def get_throttle(
# *,
# requests: int,
# seconds: IntFloat,
# mode: int,
# async_q_size: Optional[int] = None,
# early_count: Optional[int] = None,
# lb_threshold: OptIntFloat = None
# ) -> Any:
# """Create and return the throttle object given the input mode.
#
# Args:
# requests: The number of requests that can be made in
# the interval specified by seconds.
# seconds: The number of seconds in which the number of
# requests specified in requests can be made.
# mode: Specifies one of four modes for the throttle:
#
# 1) **mode=Throttle.MODE_ASYNC** specifies asynchronous
# mode. With asynchronous throttling, each request is
# placed on a queue and control returns to the caller.
# A separate thread then executes each request at a
# steady interval to achieve the specified number of
# requests per the specified number of seconds. Since
# the caller is given back control, any return values
# from the request must be handled by an established
# protocol between the caller and the request, (e.g.,
# a callback method).
# 2) **mode=Throttle.MODE_SYNC** specifies synchronous
# mode. For synchronous throttling, the caller may be
# blocked to delay the request in order to achieve the
# specified number of requests per the specified number
# of seconds. Since the request is handled
# synchronously, a return value from the request will
# be returned to the caller when the request completes.
# 3) **mode=Throttle.MODE_SYNC_EC** specifies synchronous
# mode using an early arrival algorithm. For
# synchronous throttling with the early arrival
# algorithm, an *early_count* number of requests are
# sent immediately without delay before the throttling
# becomes active. The objective is to allow a bursts of
# requests while also ensuring that the average arrival
# rate is within the limit as specified by the
# *requests* and *seconds* arguments.
# 4) **mode=Throttle.MODE_SYNC_LB** specifies synchronous
# mode using a leaky bucket algorithm. For synchronous
# throttling with the leaky bucket algorithm, some
# number of requests are sent immediately without delay
# even though they may have arrived at a quicker pace
# than that allowed by the requests and seconds
# specification. A lb_threshold specification is
# required when mode Throttle.MODE_SYNC_LB is
# specified. See the lb_threshold parameter for
# details.
# async_q_size: Specifies the size of the request
# queue for async requests. When the request
# queue is totally populated, any additional
# calls to send_request will be delayed
# until queued requests are removed and
# scheduled. The default is 4096 requests.
# early_count: Specifies the number of requests that are
# allowed to proceed immediately without delay
# for **mode=Throttle.MODE_SYNC_EC**.
# Note that a specification of 0 for
# *early_count* will result in the same
# behavior as if **mode=Throttle.MODE_SYNC**
# had been specified.
# lb_threshold: Specifies the threshold for the leaky bucket
# when Throttle.MODE_SYNC_LB is specified for
# mode. This is the number of requests that
# can be in the bucket such that the next
# request is allowed to proceed without delay.
# That request is added to the bucket, and
# then the bucket leaks out the requests.
# When the next request arrives, it will be
# delayed by whatever amount of time is
# needed for the bucket to have leaked enough
# to be at the threshold. Note that a
# specification of 1 for *lb_threshold* will
# result in the same behavior as if
# **mode=Throttle.MODE_SYNC** had been
# specified.
#
# .. # noqa: DAR101
#
# Returns:
# The throttle class for the specified mode.
#
# Raises:
# IncorrectModeSpecified: The *mode* specification must be an
# integer with a value of 1, 2, 3, or 4. Use
# Throttle.MODE_ASYNC, Throttle.MODE_SYNC,
# Throttle.MODE_SYNC_EC, or Throttle.MODE_SYNC_LB.
#
#
# :Example: instantiate an async throttle for 1 request per second
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleAsync(requests=1,
# ... seconds=1)
#
#
# :Example: instantiate an async throttle for 5 requests per 1/2
# second with an async queue size of 256
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> from threading import Event
# >>> request_throttle = ThrottleAsync(requests=5,
# ... seconds=0.5,
# ... async_q_size=256)
#
#
# :Example: instantiate a throttle for 20 requests per 2 minutes
# using the early count algorithm
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleSyncEc(requests=5,
# ... seconds=120,
# ... early_count=3)
#
#
# :Example: instantiate a throttle for 3 requests per second
# using the leaky bucket algorithm
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleSyncLb(requests=5,
# ... seconds=120,
# ... lb_threshold=5)
#
#
# """
# if mode == Throttle.MODE_SYNC:
# return ThrottleSync(requests=requests,
# seconds=seconds)
# elif mode == Throttle.MODE_ASYNC:
# return ThrottleAsync(requests=requests,
# seconds=seconds,
# async_q_size=async_q_size)
# elif mode == Throttle.MODE_SYNC_EC:
# if early_count is None:
# raise MissingEarlyCountSpecification(
# 'An argument for early_count must be specified '
# 'for mode=Throttle.MODE_SYNC_EC.'
# )
# return ThrottleSyncEc(requests=requests,
# seconds=seconds,
# early_count=early_count)
# elif mode == Throttle.MODE_SYNC_LB:
# if lb_threshold is None:
# raise MissingLbThresholdSpecification(
# 'An argument for lb_threshold must be specified '
# 'for mode=Throttle.MODE_SYNC_LB.'
# )
# return ThrottleSyncLb(requests=requests,
# seconds=seconds,
# lb_threshold=lb_threshold)
# else:
# raise IncorrectModeSpecified(
# 'The mode specification must be an '
# 'integer with value 1, 2, 3, or 4.')
########################################################################
# Throttle Base class
########################################################################
class Throttle:
"""Throttle base class."""
DEFAULT_ASYNC_Q_SIZE: Final[int] = 4096
TYPE_SHUTDOWN_NONE: Final[int] = 0
TYPE_SHUTDOWN_SOFT: Final[int] = 4
TYPE_SHUTDOWN_HARD: Final[int] = 8
RC_OK: Final[int] = 0
RC_SHUTDOWN: Final[int] = 4
class Request(NamedTuple):
"""NamedTuple for the request queue item."""
request_func: Callable[..., Any]
args: tuple[Any, ...]
kwargs: dict[str, Any]
arrival_time: float
SECS_2_NS: Final[int] = 1000000000
NS_2_SECS: Final[float] = 0.000000001
__slots__ = ('requests', 'seconds', '_target_interval',
'_target_interval_ns', 'sync_lock', '_arrival_time',
'_next_target_time', 'logger', 'pauser')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat
) -> None:
"""Initialize an instance of the Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
Raises:
IncorrectRequestsSpecified: The *requests* specification
must be a positive integer greater than zero.
IncorrectSecondsSpecified: The *seconds* specification must
be a positive int or float greater than zero.
"""
################################################################
# determine whether we are throttle decorator
################################################################
# self.decorator = False
# frame = inspect.currentframe()
# if frame is not None:
# if frame.f_back.f_code.co_name == 'throttle':
# self.decorator = True
# else:
# self.decorator = False
################################################################
# requests
################################################################
if (isinstance(requests, int)
and (0 < requests)):
self.requests = requests
else:
raise IncorrectRequestsSpecified('The requests '
'specification must be a '
'positive integer greater '
'than zero.')
################################################################
# seconds
################################################################
if isinstance(seconds, (int, float)) and (0 < seconds):
self.seconds = seconds # timedelta(seconds=seconds)
else:
raise IncorrectSecondsSpecified('The seconds specification '
'must be an integer or '
'float greater than zero.')
################################################################
# Set remainder of vars
################################################################
self._target_interval = seconds / requests
self._target_interval_ns: float = (self._target_interval
* Throttle.SECS_2_NS)
self.sync_lock = threading.Lock()
self._arrival_time = 0.0
self._next_target_time: float = time.perf_counter_ns()
self.logger = logging.getLogger(__name__)
self.pauser = Pauser()
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE
################################################################
################################################################
# The SYNC_MODE Throttle algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay
# b) if it arrived before the _next_target_time the request
# is delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
####################################################################
# get_interval
####################################################################
def get_interval_secs(self) -> float:
"""Calculate the interval between requests in seconds.
Returns:
The target interval in seconds.
"""
return self._target_interval
####################################################################
# get_interval
####################################################################
def get_interval_ns(self) -> float:
"""Calculate the interval between requests in nanoseconds.
Returns:
The target interval in nanoseconds.
"""
return self._target_interval_ns
####################################################################
# get_completion_time
####################################################################
def get_completion_time_secs(self,
requests: int,
from_start: bool) -> float:
"""Calculate completion time secs for given number requests.
Args:
requests: number of requests to do
from_start: specifies whether the calculation should be done
for a series that is starting fresh where the
first request has no delay
Returns:
The estimated number of elapsed seconds for the number
of requests specified
"""
if from_start:
return (requests - 1) * self._target_interval
else:
return requests * self._target_interval
####################################################################
# get_completion_time
####################################################################
def get_completion_time_ns(self,
requests: int,
from_start: bool) -> float:
"""Calculate completion time ns for given number requests.
Args:
requests: number of requests to do
from_start: specifies whether the calculation should be done
for a series that is starting fresh where the
first request has no delay
Returns:
The estimated number of elapsed seconds for the number
of requests specified
"""
if from_start:
return (requests - 1) * self._target_interval_ns
else:
return requests * self._target_interval_ns
########################################################################
# Throttle Base class
########################################################################
class ThrottleSync(Throttle):
"""Throttle class for sync mode."""
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat
) -> None:
"""Initialize an instance of the Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
"""
super().__init__(requests=requests,
seconds=seconds)
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 1 requests every 2 seconds
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSync(requests=1,
... seconds=2)
>>> repr(request_throttle)
'ThrottleSync(requests=1, seconds=2.0)'
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSync]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}')
return f'{classname}({parms})'
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE
################################################################
################################################################
# The SYNC_MODE Throttle algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay
# b) if it arrived before the _next_target_time the request
# is delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleSyncEc(ThrottleSync):
"""Throttle class with early count algo."""
__slots__ = ('early_count', '_early_arrival_count')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
early_count: int
) -> None:
"""Initialize an instance of the early count Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
early_count: Specifies the number of requests that are
allowed to proceed immediately without delay
for **mode=Throttle.MODE_SYNC_EC**.
Note that a specification of 0 for the
*early_count* will result in the same
behavior as if **mode=Throttle.MODE_SYNC**
had been chosen.
Raises:
IncorrectEarlyCountSpecified: *early_count* must be an
integer greater than zero.
"""
################################################################
# early_count
################################################################
super().__init__(requests=requests,
seconds=seconds)
if isinstance(early_count, int) and (0 < early_count):
self.early_count = early_count
else:
raise IncorrectEarlyCountSpecified('early_count must be '
'an integer greater'
'than zero.')
################################################################
# Set remainder of vars
################################################################
self._early_arrival_count = 0
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 2 requests per second
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSyncEc(requests=2,
... seconds=1,
... early_count=3)
>>> repr(request_throttle)
'ThrottleSyncEc(requests=2, seconds=1.0, early_count=3)'
.. # noqa: W505, E501
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSyncEc]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'early_count={self.early_count}')
return f'{classname}({parms})'
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE_EC
################################################################
################################################################
# The SYNC_MODE_EC (sync mode with early count) Throttle
# algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# c) the specified early_count is saved
# d) _early_arrival_count is set to zero
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay and the
# _early_arrival_count is reset
# b) if it arrived before the _next_target_time, the
# _early_arrival_count is increased by 1 and:
# 1) if _early_arrival_count is less than or equal to
# early_count, the request is allowed to proceed
# without delay
# 2) if _early_arrival_count is greater than early_count,
# _early_arrival_count is reset and the request is
# delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
# Note that as each request is sent, the _next_target_time is
# increased. This means that once the early count is exhausted,
# the next request will be delayed for the sum of target
# intervals of the requests that were sent without delay. This
# allows short bursts of requests to go immediately while also
# ensuring that the average interval not less than is the
# target interval.
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._next_target_time <= self._arrival_time:
self._early_arrival_count = 0
else:
self._early_arrival_count += 1
if self.early_count < self._early_arrival_count:
self._early_arrival_count = 0 # reset the count
# add an extra millisec for now as a test to see
# why sometimes the average interval is slightly
# less than we expect it to be - could be the
# inaccuracy of time.time()
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
# the shortest interval is 0.015 seconds
# time.sleep(wait_time)
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (max(float(time.perf_counter_ns()),
self._next_target_time
)
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleSyncLb(ThrottleSync):
"""Throttle class with leaky bucket algo."""
__slots__ = ('lb_threshold', '_next_target_time', 'lb_adjustment',
'lb_adjustment_ns')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
lb_threshold: IntFloat
) -> None:
"""Initialize an instance of the leaky bucket Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
lb_threshold: Specifies the threshold for the leaky bucket
when Throttle.MODE_SYNC_LB is specified for
mode. This is the number of requests that
can be in the bucket such that the next
request is allowed to proceed without delay.
That request is added to the bucket, and
then the bucket leaks out the requests.
When the next request arrives, it will be
delayed by whatever amount of time is
needed for the bucket to have leaked enough
to be at the threshold. A specification of
zero for the lb_threshold will effectively
cause all requests that are early to be
delayed.
Raises:
IncorrectLbThresholdSpecified: *lb_threshold* must be an
integer or float greater than zero.
"""
################################################################
# lb_threshold
################################################################
super().__init__(requests=requests,
seconds=seconds)
if (isinstance(lb_threshold, (int, float))
and (0 < lb_threshold)):
self.lb_threshold = float(lb_threshold)
else:
raise IncorrectLbThresholdSpecified(
'lb_threshold must be an integer or float greater than '
'zero.')
################################################################
# Set remainder of vars
################################################################
self.lb_adjustment: float = max(0.0,
(self._target_interval
* self.lb_threshold)
- self._target_interval)
self.lb_adjustment_ns: float = self.lb_adjustment * Throttle.SECS_2_NS
# adjust _next_target_time for lb algo
self._next_target_time = time.perf_counter_ns() - self.lb_adjustment_ns
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 20 requests per 1/2 minute
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSyncLb(requests=20,
... seconds=30,
... lb_threshold=4)
>>> repr(request_throttle)
'ThrottleSyncLb(requests=20, seconds=30.0, lb_threshold=4.0)'
.. # noqa: W505, E501
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSyncLb]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'lb_threshold={self.lb_threshold}')
return f'{classname}({parms})'
####################################################################
# MODE_SYNC_LB send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return value from the request function (perhaps None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# Leaky Bucket
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
############################################################
# The leaky bucket algorith uses a virtual bucket into which
# arriving requests are placed. As time progresses, the
# bucket leaks the requests out at the rate of the target
# interval. If the bucket has room for an arriving request,
# the request is placed into the bucket and is sent
# immediately. If, instead, the bucket does not have room
# for the request, the request is delayed until the bucket
# has leaked enough of the preceding requests such that the
# new request can fit and be sent. The effect of the bucket
# is to allow a burst of requests to be sent immediately at
# a faster rate than the target interval, acting as a
# shock absorber to the flow of traffic. The number of
# requests allowed to go immediately is controlled by the
# size of the bucket which in turn is specified by the
# lb_threshold argument when the throttle is instantiated.
#
# Note that by allowing short bursts to go immediately,
# the overall effect is that the average interval will be
# less than the target interval.
#
# The actual implementation does not employ a bucket, but
# instead sets a target time for the next request by adding
# the target interval and subtracting the size of the
# bucket. This has the effect of making it appear as if
# requests are arriving after the target time and are thus
# in compliance with the target interval, but eventually
# the next target time will exceed the size of the bucket
# and request will get delayed and allow the target time
# to catch up.
############################################################
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (max(float(time.perf_counter_ns()),
self._next_target_time
+ self.lb_adjustment_ns
)
- self.lb_adjustment_ns
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleAsync(Throttle):
"""An asynchronous throttle mechanism."""
__slots__ = ('async_q_size', 'shutdown_lock', '_shutdown',
'do_shutdown', 'hard_shutdown_initiated',
'_check_async_q_time', '_check_async_q_time2',
'shutdown_start_time', 'shutdown_elapsed_time',
'async_q', 'request_scheduler_thread')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None,
) -> None:
"""Initialize an instance of the ThrottleAsync class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
async_q_size: Specifies the size of the request
queue for async requests. When the request
queue is totally populated, any additional
calls to send_request will be delayed
until queued requests are removed and
scheduled. The default is 4096 requests.
Raises:
IncorrectAsyncQSizeSpecified: *async_q_size* must be an
integer greater than zero.
"""
################################################################
# States and processing for mode Throttle.MODE_ASYNC:
#
# The Throttle is initialized with an empty async_q and the
# scheduler thread is started and ready to receive work. The
# starting state is 'active'.
#
# 1) state: active
# a) send_request called (directly or via decorated func
# call):
# 1) request is queued to the async_q
# 2) state remains 'active'
# b) start_shutdown called:
# 1) state is changed to 'shutdown'
# 2) Any new requests are rejected. For "soft"
# shutdown, scheduler schedules the remaining requests
# currently queued on the async_q with the normal
# interval. With "hard" shutdown, the scheduler
# removes and discards the requests on the async_q.
# 3) scheduler exits
# 4) control returns after scheduler thread returns
# 2) state: shutdown
# a) send_request called (directly or via decorated func
# call):
# 1) request is ignored (i.e, not queued to async_q)
# b) start_shutdown called (non-decorator only):
# 1) state remains 'shutdown'
# 2) control returns immediately
################################################################
################################################################
# async_q_size
################################################################
super().__init__(requests=requests,
seconds=seconds)
if async_q_size is not None:
if (isinstance(async_q_size, int) and
(0 < async_q_size)):
self.async_q_size = async_q_size
else:
raise IncorrectAsyncQSizeSpecified('async_q_size '
'must be an '
'integer greater'
'than zero.')
else:
self.async_q_size = Throttle.DEFAULT_ASYNC_Q_SIZE
################################################################
# Set remainder of vars
################################################################
self.shutdown_lock = threading.Lock()
self._shutdown = False
self.do_shutdown = Throttle.TYPE_SHUTDOWN_NONE
self.hard_shutdown_initiated = False
self._check_async_q_time = 0.0
self._check_async_q_time2 = 0.0
self.shutdown_start_time = 0.0
self.shutdown_elapsed_time = 0.0
self.async_q: queue.Queue[Throttle.Request] = queue.Queue(
maxsize=self.async_q_size)
self.request_scheduler_thread: threading.Thread = threading.Thread(
target=self.schedule_requests)
self.request_scheduler_thread.start()
####################################################################
# len
####################################################################
def __len__(self) -> int:
"""Return the number of items in the async_q.
Returns:
The number of entries in the async_q as an integer
The calls to the send_request add request items to the async_q
for mode Throttle.MODE_ASYNC. The request items are
eventually removed and scheduled. The len of Throttle is the
number of request items on the async_q when the len function
is called. Note that the returned queue size is the approximate
size as described in the documentation for the python threading
queue.
:Example: instantiate a throttle for 1 request per second
>>> from scottbrian_throttle.throttle import Throttle
>>> import time
>>> def my_request():
... pass
>>> request_throttle = ThrottleAsync(requests=1,
... seconds=1)
>>> for i in range(3): # quickly queue up 3 items
... _ = request_throttle.send_request(my_request)
>>> time.sleep(0.5) # allow first request to be dequeued
>>> print(len(request_throttle))
2
>>> request_throttle.start_shutdown()
"""
return self.async_q.qsize()
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 20 requests per 1/2 minute
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleAsync(requests=30,
... seconds=30)
...
>>> repr(request_throttle)
'ThrottleAsync(requests=30, seconds=30.0, async_q_size=4096)'
>>> request_throttle.start_shutdown()
"""
if TYPE_CHECKING:
__class__: Type[ThrottleAsync]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'async_q_size={self.async_q_size}')
return f'{classname}({parms})'
####################################################################
# ASYNC_MODE send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> int:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
* ``Throttle.RC_OK`` (0) request scheduled
* ``Throttle.RC_SHUTDOWN`` (4) - the request was rejected
because the throttle was shut down.
"""
if self._shutdown:
return Throttle.RC_SHUTDOWN
# TODO: use se_lock
# We obtain the shutdown lock to protect against the following
# scenario:
# 1) send_request is entered for async mode and sees at
# the while statement that we are *not* in shutdown
# 2) send_request proceeds to the try statement just
# before the request will be queued to the async_q
# 2) shutdown is requested and is detected by
# schedule_requests
# 3) schedule_requests cleans up the async_q end exits
# 4) back here in send_request, we put our request on the
# async_q - this request will never be processed
with self.shutdown_lock:
request_item = Throttle.Request(func,
args,
kwargs,
time.perf_counter_ns())
while not self._shutdown:
try:
self.async_q.put(request_item,
block=True,
timeout=0.5)
return Throttle.RC_OK
except queue.Full:
continue # no need to wait since we already did
return Throttle.RC_SHUTDOWN
####################################################################
# schedule_requests
####################################################################
def schedule_requests(self) -> None:
"""Get tasks from queue and run them.
Raises:
Exception: re-raise any throttle schedule_requests unhandled
exception in request
"""
# Requests will be scheduled from the async_q at the interval
# calculated from the requests and seconds arguments when the
# throttle was instantiated. If shutdown is indicated,
# the async_q will be cleaned up with any remaining requests
# either processed (Throttle.TYPE_SHUTDOWN_SOFT) or dropped
# (Throttle.TYPE_SHUTDOWN_HARD). Note that async_q.get will only
# wait for a second to allow us to detect shutdown in a timely
# fashion.
while True:
# obtained_nowait = False
# try:
# self._check_async_q_time = time.perf_counter_ns()
#
# request_item = self.async_q.get_nowait()
# self._next_target_time = (time.perf_counter_ns()
# + self._target_interval_ns)
# obtained_nowait = True
# except queue.Empty:
try:
# self._check_async_q_time2 = time.perf_counter_ns()
request_item = self.async_q.get(block=True,
timeout=1)
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
except queue.Empty:
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_NONE:
return
continue # no need to wait since we already did
############################################################
# Call the request function.
# We use try/except to log and re-raise any unhandled
# errors.
############################################################
try:
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_HARD:
self._arrival_time = request_item.arrival_time
request_item.request_func(*request_item.args,
**request_item.kwargs)
# obtained_nowait=obtained_nowait)
except Exception as e:
self.logger.debug('throttle schedule_requests unhandled '
f'exception in request: {e}')
raise
############################################################
# wait (i.e., throttle)
# Note that the wait time could be anywhere from a fraction
# of a second to several seconds. We want to be responsive
# in case we need to bail for shutdown, so we wait in 1
# second or fewer increments and bail if we detect shutdown.
############################################################
while True:
# handle shutdown
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_NONE:
if self.async_q.empty():
return # we are done with shutdown
if self.do_shutdown == Throttle.TYPE_SHUTDOWN_HARD:
break # don't sleep for hard shutdown
# Use min to ensure we don't sleep too long and appear
# slow to respond to a shutdown request
sleep_seconds = (self._next_target_time
- time.perf_counter_ns()) * Throttle.NS_2_SECS
if sleep_seconds > 0: # if still time to go
self.pauser.pause(min(1.0, sleep_seconds))
# time_trace, stop_time = self.pauser.pause(min(1.0,
# sleep_seconds))
# self.time_traces.append(time_trace)
# self.stop_times.append(stop_time)
else: # we are done sleeping
break
####################################################################
# start_shutdown
####################################################################
def start_shutdown(self,
shutdown_type: int = Throttle.TYPE_SHUTDOWN_SOFT,
timeout: OptIntFloat = None
) -> bool:
"""Shutdown the async throttle request scheduling.
Shutdown is used to stop and clean up any pending requests on
the async request queue for a throttle created with
mode Throttle.MODE_ASYNC. This should be done during normal
application shutdown or when an error occurs. Once the throttle
has completed shutdown it can no longer be used. If a throttle
is once again needed after shutdown, a new one will need to be
instantiated to replace the old one.
Note that a soft shutdown can be started and eventually be
followed by a hard shutdown to force shutdown to complete
quickly. A hard shutdown, however, can not be followed by a
soft shutdown since there is no way to retrieve and run any
of the requests that were already removed and tossed by the
hard shutdown.
Args:
shutdown_type: specifies whether to do a soft or a hard
shutdown:
* A soft shutdown
(Throttle.TYPE_SHUTDOWN_SOFT),
the default, stops any additional
requests from being queued and cleans up
the request queue by scheduling any
remaining requests at the normal interval
as calculated by the *seconds* and
*requests* arguments specified during
throttle instantiation.
* A hard shutdown
(Throttle.TYPE_SHUTDOWN_HARD) stops any
additional requests from being queued and
cleans up the request queue by quickly
removing any remaining requests without
executing them.
timeout: number of seconds to allow for shutdown to
complete. If the shutdown times out, control is
returned with a return value of False. The
shutdown will continue and a subsequent call to
start_shutdown, with or without a timeout value,
may eventually return control with a return value
of True to indicate that the shutdown has
completed. Note that a *timeout* value of zero or
less is handled as if shutdown None was
specified, whether explicitly or by default, in
which case the shutdown will not timeout and will
control will be returned if and when the shutdown
completes. A very small value, such as 0.001,
can be used to start the shutdown and then get
back control to allow other cleanup activities
to be performed and eventually issue a second
shutdown request to ensure that it is completed.
.. # noqa: DAR101
Returns:
* ``True`` if *timeout* was not specified, or if it was
specified and the ``start_shutdown()`` request completed
within the specified number of seconds.
* ``False`` if *timeout* was specified and the
``start_shutdown()`` request did not complete within the
specified number of seconds, or a soft shutdown was
terminated by a hard shutdown.
Raises:
IllegalSoftShutdownAfterHard: A shutdown with shutdown_type
Throttle.TYPE_SHUTDOWN_SOFT was requested after a
shutdown with shutdown_type Throttle.TYPE_SHUTDOWN_HARD
had already been initiated. Once a hard shutdown has
been initiated, a soft shutdown is not allowed.
IncorrectShutdownTypeSpecified: For start_shutdown,
*shutdownType* must be specified as either
Throttle.TYPE_SHUTDOWN_SOFT or
Throttle.TYPE_SHUTDOWN_HARD
"""
if shutdown_type not in (Throttle.TYPE_SHUTDOWN_SOFT,
Throttle.TYPE_SHUTDOWN_HARD):
raise IncorrectShutdownTypeSpecified(
'For start_shutdown, shutdownType must be specified as '
'either Throttle.TYPE_SHUTDOWN_SOFT or '
'Throttle.TYPE_SHUTDOWN_HARD')
################################################################
# We are good to go for shutdown
################################################################
self._shutdown = True # tell send_request to reject requests
# There is only one shutdown per throttle instantiation, so we
# will capture the shutdown length of time starting with the
# first shutdown request. Any subsequent shutdown requests will
# not affect the total shutdown time.
if self.shutdown_start_time == 0.0:
self.shutdown_start_time = time.time()
# We use the shutdown lock to block us until any in progress
# send_requests are complete
# TODO: use se_lock
with self.shutdown_lock:
# It is OK to start a soft shutdown and follow that with
# a hard shutdown, but not OK to start a hard shutdown
# and then follow that with a soft shutdown. The reason is
# that a soft shutdown finishes the queued requests while
# also doing the throttling, meaning that a soft shutdown
# is done when the queued requests are important and must be
# done. Following a soft shutdown with a hard shutdown
# would indicate that the soft shutdown was taking too long
# and there was a decision to end it with the hard shutdown
# for the more dire need to bring the system down quickly.
# A hard shutdown, on the other hand, is initially
# done when the requests are not required to complete. So,
# following a hard shutdown with a soft shutdown would
# indicate conflict, and in this case it will be impossible
# to retrieve the requests that have already been tossed.
# We tell the caller via the exception that the soft request
# after a hard request is a conflict that may not have been
# intended.
if shutdown_type == Throttle.TYPE_SHUTDOWN_HARD:
self.hard_shutdown_initiated = True
# if soft shutdown in progress
if self.do_shutdown == Throttle.TYPE_SHUTDOWN_SOFT:
self.logger.debug('Hard shutdown request detected soft '
'shutdown in progress - soft shutdown '
'will terminate.')
elif self.hard_shutdown_initiated: # soft after hard
raise IllegalSoftShutdownAfterHard(
'A shutdown with shutdown_type '
'Throttle.TYPE_SHUTDOWN_SOFT was requested after a '
'shutdown with shutdown_type '
'Throttle.TYPE_SHUTDOWN_HARD had already been '
'initiated. Once a hard shutdown has been '
'initiated, a soft shutdown is not allowed.')
# now that we are OK with the shutdown type, set do_shutdown
# with the type of shutdown to tell the schedule_requests
# method how to handle the queued requests (toss for hard
# shutdown, complete normally with throttling for soft
# shutdown)
self.do_shutdown = shutdown_type
# join the schedule_requests thread to wait for the shutdown
if timeout and (timeout > 0):
self.request_scheduler_thread.join(timeout=timeout)
if self.request_scheduler_thread.is_alive():
self.logger.debug('start_shutdown request timed out '
f'with {timeout=:.4f}')
return False # we timed out
else:
self.request_scheduler_thread.join()
with self.shutdown_lock:
if (shutdown_type == Throttle.TYPE_SHUTDOWN_SOFT
and self.hard_shutdown_initiated):
self.logger.debug('Soft shutdown request detected hard '
'shutdown initiated - soft shutdown '
'returning False.')
return False # the soft shutdown was terminated
# indicate shutdown no longer in progress
self.do_shutdown = Throttle.TYPE_SHUTDOWN_NONE
if self.shutdown_elapsed_time == 0.0:
self.shutdown_elapsed_time = (time.time()
- self.shutdown_start_time)
self.logger.debug('start_shutdown request successfully completed '
f'in {self.shutdown_elapsed_time:.4f} seconds')
return True # shutdown was successful
########################################################################
# Pie Throttle Decorator
########################################################################
F = TypeVar('F', bound=Callable[..., Any])
########################################################################
# FuncWithThrottleSyncAttr class
########################################################################
class FuncWithThrottleSyncAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSync
__call__: F
def add_throttle_sync_attr(func: F) -> FuncWithThrottleSyncAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncAttr[F], func)
########################################################################
# FuncWithThrottleSyncEcAttr class
########################################################################
class FuncWithThrottleSyncEcAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSyncEc
__call__: F
def add_throttle_sync_ec_attr(func: F) -> FuncWithThrottleSyncEcAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncEcAttr[F], func)
########################################################################
# FuncWithThrottleSyncLbAttr class
########################################################################
class FuncWithThrottleSyncLbAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSyncLb
__call__: F
def add_throttle_sync_lb_attr(func: F) -> FuncWithThrottleSyncLbAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncLbAttr[F], func)
########################################################################
# FuncWithThrottleAsyncAttr class
########################################################################
class FuncWithThrottleAsyncAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleAsync
__call__: F
def add_throttle_async_attr(func: F) -> FuncWithThrottleAsyncAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleAsyncAttr[F], func)
########################################################################
# @throttle_sync
########################################################################
@overload
def throttle_sync(wrapped: F, *,
requests: int,
seconds: IntFloat
) -> FuncWithThrottleSyncAttr[F]:
pass
@overload
def throttle_sync(*,
requests: int,
seconds: IntFloat
) -> Callable[[F], FuncWithThrottleSyncAttr[F]]:
pass
def throttle_sync(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any
) -> Union[F, FuncWithThrottleSyncAttr[F]]:
"""Decorator to wrap a function in a sync throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with an async throttle for 1 request
per second
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync(requests=1, seconds=1)
... def f1() -> None:
... print('example 1 request function')
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncAttr[F],
functools.partial(throttle_sync,
requests=requests,
seconds=seconds))
a_throttle_sync = ThrottleSync(requests=requests,
seconds=seconds)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_attr(wrapper)
wrapper.throttle = a_throttle_sync
return cast(FuncWithThrottleSyncAttr[F], wrapper)
########################################################################
# @throttle_sync_ec
########################################################################
@overload
def throttle_sync_ec(wrapped: F, *,
requests: int,
seconds: IntFloat,
early_count: int
) -> FuncWithThrottleSyncEcAttr[F]:
pass
@overload
def throttle_sync_ec(*,
requests: int,
seconds: IntFloat,
early_count: int
) -> Callable[[F], FuncWithThrottleSyncEcAttr[F]]:
pass
def throttle_sync_ec(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
early_count: int
) -> Union[F, FuncWithThrottleSyncEcAttr[F]]:
"""Decorator to wrap a function in a sync ec throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
early_count: Specifies the number of requests that are allowed
to proceed that arrive earlier than the
allowed interval. The count of early requests
is incremented, and when it exceeds the
early_count, the request will be delayed to
align it with its expected arrival time. Any
request that arrives at or beyond the
allowed interval will cause the count to be
reset (included the request that was delayed
since it will now be sent at the allowed
interval). A specification of zero for the
*early_count* will effectively cause all requests
that are early to be delayed.
Returns:
A callable function that, for mode Throttle.MODE_SYNC_EC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with a throttle for 20 requests per 2
minutes using the early count algo
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync_ec(requests=5,
... seconds=120,
... early_count=3)
... def f3(b=3) -> int:
... print(f'example 3 request function with arg {b}')
... return b * 5
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncEcAttr[F],
functools.partial(throttle_sync_ec,
requests=requests,
seconds=seconds,
early_count=early_count))
a_throttle_sync_ec = ThrottleSyncEc(requests=requests,
seconds=seconds,
early_count=early_count)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync_ec.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_ec_attr(wrapper)
wrapper.throttle = a_throttle_sync_ec
return cast(FuncWithThrottleSyncEcAttr[F], wrapper)
########################################################################
# @throttle_sync_lb
########################################################################
@overload
def throttle_sync_lb(wrapped: F, *,
requests: int,
seconds: IntFloat,
lb_threshold: float
) -> FuncWithThrottleSyncLbAttr[F]:
pass
@overload
def throttle_sync_lb(*,
requests: int,
seconds: IntFloat,
lb_threshold: float
) -> Callable[[F], FuncWithThrottleSyncLbAttr[F]]:
pass
def throttle_sync_lb(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
lb_threshold: float
) -> Union[F, FuncWithThrottleSyncLbAttr[F]]:
"""Decorator to wrap a function in a sync lb throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
lb_threshold: Specifies the threshold for the leaky bucket when
Throttle.MODE_SYNC_LB is specified for mode.
This is the number of requests that can be in
the bucket such that the next request is allowed
to proceed without delay. That request is
added to the bucket, and then the bucket leaks
out the requests. When the next request arrives,
it will be delayed by whatever amount of time is
needed for the bucket to have leaked enough to
be at the threshold. A specification of zero for
the *lb_threshold* will effectively cause all
requests that are early to be delayed.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with a throttle for 3 requests per
second using the leaky bucket algo
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync_lb(requests=5,
... seconds=120,
... lb_threshold=5)
... def f4(a, *, b=4) -> int:
... print(f'example request function with args {a} and {b}')
... return b * 7
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncLbAttr[F],
functools.partial(throttle_sync_lb,
requests=requests,
seconds=seconds,
lb_threshold=lb_threshold))
a_throttle_sync_lb = ThrottleSyncLb(requests=requests,
seconds=seconds,
lb_threshold=lb_threshold)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync_lb.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_lb_attr(wrapper)
wrapper.throttle = a_throttle_sync_lb
return cast(FuncWithThrottleSyncLbAttr[F], wrapper)
########################################################################
# @throttle_async
########################################################################
@overload
def throttle_async(wrapped: F, *,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None
) -> FuncWithThrottleAsyncAttr[F]:
pass
@overload
def throttle_async(*,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None
) -> Callable[[F], FuncWithThrottleAsyncAttr[F]]:
pass
def throttle_async(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
async_q_size: Optional[int] = None
) -> Union[F, FuncWithThrottleAsyncAttr[F]]:
"""Decorator to wrap a function in an async throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
async_q_size: Specifies the size of the request
queue for async requests. When the request
queue is totaly populated, any additional
calls to send_request will be delayed
until queued requests are removed and
scheduled. The default is 4096 requests.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with an async throttle for 1 request
per second
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_async(requests=1, seconds=1)
... def f1() -> None:
... print('example 1 request function')
>>> shutdown_throttle_funcs(f1)
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleAsyncAttr[F],
functools.partial(throttle_async,
requests=requests,
seconds=seconds,
async_q_size=async_q_size))
a_throttle_async = ThrottleAsync(requests=requests,
seconds=seconds,
async_q_size=async_q_size)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_async.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_async_attr(wrapper)
wrapper.throttle = a_throttle_async
return cast(FuncWithThrottleAsyncAttr[F], wrapper)
########################################################################
# shutdown_throttle_funcs
########################################################################
def shutdown_throttle_funcs(
*args: FuncWithThrottleAsyncAttr[Callable[..., Any]],
# *args: FuncWithThrottleAttr[Protocol[F]],
shutdown_type: int = Throttle.TYPE_SHUTDOWN_SOFT,
timeout: OptIntFloat = None
) -> bool:
"""Shutdown the throttle request scheduling for decorated functions.
The shutdown_throttle_funcs function is used to shutdown one or more
function that were decorated with the throttle. The arguments apply
to each of the functions that are specified to be shutdown. If
timeout is specified, then True is returned iff all functions
shutdown within the timeout number of second specified.
Args:
args: one or more functions to be shutdown
shutdown_type: specifies whether to do a soft or a hard
shutdown:
* A soft shutdown
(Throttle.TYPE_SHUTDOWN_SOFT), the default,
stops any additional requests from being
queued and cleans up the request queue by
scheduling any remaining requests at the
normal interval as calculated by the seconds
and requests that were specified during
instantiation.
* A hard shutdown (Throttle.TYPE_SHUTDOWN_HARD)
stops any additional requests from being
queued and cleans up the request queue by
quickly removing any remaining requests
without executing them.
timeout: number of seconds to allow for shutdown to complete for
all functions specified to be shutdown.
Note that a *timeout* of zero or less is equivalent
to a *timeout* of None, meaning start_shutdown will
return when the shutdown is complete without a
timeout.
.. # noqa: DAR101
Returns:
* ``True`` if *timeout* was not specified, or if it was
specified and all of the specified functions completed
shutdown within the specified number of seconds.
* ``False`` if *timeout* was specified and at least one of the
functions specified to shutdown did not complete within the
specified number of seconds.
"""
start_time = time.time() # start the clock
####################################################################
# get all shutdowns started
####################################################################
for func in args:
func.throttle.start_shutdown(
shutdown_type=shutdown_type,
timeout=0.01)
####################################################################
# check each shutdown
# Note that if timeout was not specified, then we simply call
# shutdown for each func and hope that each one eventually
# completes. If timeout was specified, then we will call each
# shutdown with whatever timeout time remains and bail on the first
# timeout we get.
####################################################################
if timeout is None or timeout <= 0:
for func in args:
func.throttle.start_shutdown(shutdown_type=shutdown_type)
else: # timeout specified and is a non-zero positive value
for func in args:
# use min to ensure non-zero positive timeout value
if not func.throttle.start_shutdown(
shutdown_type=shutdown_type,
timeout=max(0.01, start_time + timeout - time.time())):
func.throttle.logger.debug('timeout of '
'shutdown_throttle_funcs '
f'with timeout={timeout}')
return False # we timed out
# if we are here then all shutdowns are complete
return True
|
[
"threading.Thread",
"functools.partial",
"scottbrian_utils.pauser.Pauser",
"typing.cast",
"time.time",
"threading.Lock",
"time.perf_counter_ns",
"typing.TypeVar",
"queue.Queue",
"logging.getLogger"
] |
[((84181, 84219), 'typing.TypeVar', 'TypeVar', (['"""F"""'], {'bound': 'Callable[..., Any]'}), "('F', bound=Callable[..., Any])\n", (84188, 84219), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((84836, 84875), 'typing.cast', 'cast', (['FuncWithThrottleSyncAttr[F]', 'func'], {}), '(FuncWithThrottleSyncAttr[F], func)\n', (84840, 84875), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((85503, 85544), 'typing.cast', 'cast', (['FuncWithThrottleSyncEcAttr[F]', 'func'], {}), '(FuncWithThrottleSyncEcAttr[F], func)\n', (85507, 85544), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((86172, 86213), 'typing.cast', 'cast', (['FuncWithThrottleSyncLbAttr[F]', 'func'], {}), '(FuncWithThrottleSyncLbAttr[F], func)\n', (86176, 86213), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((86835, 86875), 'typing.cast', 'cast', (['FuncWithThrottleAsyncAttr[F]', 'func'], {}), '(FuncWithThrottleAsyncAttr[F], func)\n', (86839, 86875), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((91920, 91962), 'typing.cast', 'cast', (['FuncWithThrottleSyncAttr[F]', 'wrapper'], {}), '(FuncWithThrottleSyncAttr[F], wrapper)\n', (91924, 91962), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((98335, 98379), 'typing.cast', 'cast', (['FuncWithThrottleSyncEcAttr[F]', 'wrapper'], {}), '(FuncWithThrottleSyncEcAttr[F], wrapper)\n', (98339, 98379), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((104765, 104809), 'typing.cast', 'cast', (['FuncWithThrottleSyncLbAttr[F]', 'wrapper'], {}), '(FuncWithThrottleSyncLbAttr[F], wrapper)\n', (104769, 104809), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((110608, 110651), 'typing.cast', 'cast', (['FuncWithThrottleAsyncAttr[F]', 'wrapper'], {}), '(FuncWithThrottleAsyncAttr[F], wrapper)\n', (110612, 110651), False, 'from typing import Any, Callable, cast, Final, NamedTuple, Optional, overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union\n'), ((113303, 113314), 'time.time', 'time.time', ([], {}), '()\n', (113312, 113314), False, 'import time\n'), ((28735, 28751), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (28749, 28751), False, 'import threading\n'), ((28825, 28847), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (28845, 28847), False, 'import time\n'), ((28870, 28897), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (28887, 28897), False, 'import logging\n'), ((28920, 28928), 'scottbrian_utils.pauser.Pauser', 'Pauser', ([], {}), '()\n', (28926, 28928), False, 'from scottbrian_utils.pauser import Pauser\n'), ((65027, 65043), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (65041, 65043), False, 'import threading\n'), ((65388, 65426), 'queue.Queue', 'queue.Queue', ([], {'maxsize': 'self.async_q_size'}), '(maxsize=self.async_q_size)\n', (65399, 65426), False, 'import queue\n'), ((65498, 65545), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.schedule_requests'}), '(target=self.schedule_requests)\n', (65514, 65545), False, 'import threading\n'), ((30994, 31016), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (31014, 31016), False, 'import time\n'), ((39604, 39626), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (39624, 39626), False, 'import time\n'), ((48145, 48167), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (48165, 48167), False, 'import time\n'), ((54188, 54210), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (54208, 54210), False, 'import time\n'), ((56418, 56440), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (56438, 56440), False, 'import time\n'), ((79944, 79955), 'time.time', 'time.time', ([], {}), '()\n', (79953, 79955), False, 'import time\n'), ((91194, 91262), 'functools.partial', 'functools.partial', (['throttle_sync'], {'requests': 'requests', 'seconds': 'seconds'}), '(throttle_sync, requests=requests, seconds=seconds)\n', (91211, 91262), False, 'import functools\n'), ((97453, 97553), 'functools.partial', 'functools.partial', (['throttle_sync_ec'], {'requests': 'requests', 'seconds': 'seconds', 'early_count': 'early_count'}), '(throttle_sync_ec, requests=requests, seconds=seconds,\n early_count=early_count)\n', (97470, 97553), False, 'import functools\n'), ((103879, 103981), 'functools.partial', 'functools.partial', (['throttle_sync_lb'], {'requests': 'requests', 'seconds': 'seconds', 'lb_threshold': 'lb_threshold'}), '(throttle_sync_lb, requests=requests, seconds=seconds,\n lb_threshold=lb_threshold)\n', (103896, 103981), False, 'import functools\n'), ((109743, 109843), 'functools.partial', 'functools.partial', (['throttle_async'], {'requests': 'requests', 'seconds': 'seconds', 'async_q_size': 'async_q_size'}), '(throttle_async, requests=requests, seconds=seconds,\n async_q_size=async_q_size)\n', (109760, 109843), False, 'import functools\n'), ((32386, 32408), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (32406, 32408), False, 'import time\n'), ((40996, 41018), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (41016, 41018), False, 'import time\n'), ((69764, 69786), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (69784, 69786), False, 'import time\n'), ((71774, 71796), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (71794, 71796), False, 'import time\n'), ((83713, 83724), 'time.time', 'time.time', ([], {}), '()\n', (83722, 83724), False, 'import time\n'), ((50162, 50184), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (50182, 50184), False, 'import time\n'), ((73923, 73945), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (73943, 73945), False, 'import time\n'), ((59803, 59825), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (59823, 59825), False, 'import time\n'), ((114550, 114561), 'time.time', 'time.time', ([], {}), '()\n', (114559, 114561), False, 'import time\n')]
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # This is a data-parallelized Neural Network # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
############################################################################################################
########################################### IMPORT PACKAGES ################################################
############################################################################################################
# General
import os
import functools
import time
import numpy as np
import pandas as pd
import random
import math
import warnings
# Parallelization
from mpi4py import MPI
##############################################################################################################
########################################## HELPER FUNCTIONS ##################################################
##############################################################################################################
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_der(x):
return sigmoid(x) *(1-sigmoid (x))
def train_network(wh,wo,epochs,train_X,train_Y):
for epoch in range(epochs):
# slice data
sliced_inputs = np.asarray(np.split(train_X, comm.size))
sliced_labels = np.asarray(np.split(train_Y, comm.size))
size = int(len(train_X)/comm.size)
inputs_buf = np.zeros((size,hidden_layer_size))
labels_buf = np.zeros(len(train_Y),dtype='i')
# send data to each process
comm.Scatter(sliced_inputs, inputs_buf, root=0)
comm.Scatter(sliced_labels, labels_buf, root=0)
### neural network iterations ###
## feedforward ##
# hidden layer
zh = np.dot(train_X, wh)
ah = sigmoid(zh)
# output layer
zo = np.dot(ah, wo)
ao = sigmoid(zo)
# error calculation
error_out = ((1 / (2*len(train_X))) * (np.power((ao - train_Y), 2)))
## backpropogation ##
# backpropogation from output layer to hidden layer
dcost_dao = ao - train_Y
dao_dzo = sigmoid_der(zo)
dzo_dwo = ah
dcost_wo = np.dot(dzo_dwo.T, (dcost_dao * dao_dzo))
# backpropogate from hidden layer to input layer
dcost_dzo = dcost_dao * dao_dzo
dzo_dah = wo
dcost_dah = np.dot(dcost_dzo , dzo_dah.T)
dah_dzh = sigmoid_der(zh)
dzh_dwh = train_X
dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)
comm.Barrier()
# average error for all processes
error_buf = [0] * comm.size
try:
error_buf = comm.gather(error_out)
error_out = sum(error_buf) / len(error_buf)
except TypeError as e:
pass
# if comm.rank == 0:
# print(f'error at iteration {epoch}: {error_out.sum()}')
# gather gradients of weights for hidden layer from all processes
dcost_wh_buf = np.asarray([np.zeros_like(dcost_wh)] * comm.size)
comm.Gather(dcost_wh, dcost_wh_buf)
comm.Barrier()
dcost_wh = functools.reduce(np.add, dcost_wh_buf) / comm.size # average gradients across all processes
# gather gradients of weights for output layer
dcost_wo_buf = np.asarray([np.zeros_like(dcost_wo)] * comm.size)
comm.Gather(dcost_wo, dcost_wo_buf)
comm.Barrier()
dcost_wo = functools.reduce(np.add, dcost_wo_buf) / comm.size # average gradients across all processes
# update weights
wh -= lr * dcost_wh
wo -= lr * dcost_wo
# send updated weights to processes
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
return wh,wo
def predict(theta1,theta2, inputs):
a2 = np.dot(inputs, theta1)
a2 = sigmoid(a2)
a3 = np.dot(a2, theta2)
a3 = pd.Series(sigmoid(a3).reshape(-1))
predictions = np.where(a3 >= 0.5,1,-1)
return pd.Series(predictions)
def accuracy_measures(predictions,actual):
df = pd.concat([predictions,actual],axis = 1) # concatenate predicitons & actual labels into single dataframe
df.columns = ['predictions','actual']
df['correct'] = np.where(df.predictions == df.actual,1,0)
# true positives
positives = df.loc[df.actual == 1]
true_positives = positives.correct.sum()
# false negatives
false_negatives = (positives.predictions == -1).sum()
# tru negatives
negatives = df.loc[df.actual == -1]
true_negatives = negatives.correct.sum()
# false Positives
false_positives = (negatives.predictions == -1).sum()
# overall accuracy
accuracy = (true_positives + true_negatives)/(true_positives + true_negatives + false_positives + false_negatives)
# precision
precision = true_positives/(true_positives + false_positives)
# recall (sensitivity)
sensitivity = true_positives/(true_positives+false_negatives)
# specificity
specificity = true_negatives/(true_negatives + false_positives)
return accuracy,precision, sensitivity, specificity
############################################################################################################
######################################## EXECUTION & PERFORMANCE ###########################################
############################################################################################################
if __name__ == '__main__':
#suppress warnings
warnings.filterwarnings('ignore')
####################################################
############ DATA IMPORT & FORMATTING ##############
####################################################
model_df = pd.read_csv('blackjack.csv')
X = np.array(model_df[[i for i in model_df.columns if i not in {'correct_action','outcome'}]])
train_X = np.array(model_df[['player_initial_total', 'has_ace', 'dealer_card','count']])
train_Y = np.array(model_df['correct_action']).reshape(-1,1)
####################################################
############### MPI INITIALIZATION #################
####################################################
# Init MPI
comm = MPI.COMM_WORLD
# structure of the 3-layer neural network
hidden_layer_size = 10
output_layer_size = 1
lr = 1 # learning rate
epochs = 50 # iterations
# randomly initialize weights
if comm.rank == 0:
wh = np.random.rand(train_X.shape[1],hidden_layer_size) # weights for hidden layer
wo = np.random.rand(hidden_layer_size, 1) # weights for output layer
else:
wh = np.random.rand(train_X.shape[1],hidden_layer_size)
wo = np.random.rand(hidden_layer_size, 1)
comm.Barrier()
# communicate weight vectors
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
#################################################
############ NEURAL NETWORK TRAINING ############
#################################################
if comm.rank == 0:
start = time.time()
wh,wo = train_network(wh,wo,epochs,train_X,train_Y)
if comm.rank == 0:
end = time.time()
train_time = round(end-start,2)
print(f'\nEND OF TRAINING, took {train_time} seconds\n')
# write training time to file for plotting
out_filename = f'nn_train_{comm.size}.txt'
outfile = open(out_filename, "w")
outfile.write(str(train_time))
################################################
############ PREDICTIONS & RESULTS #############
################################################
# generate predictions
predictions = predict(wh,wo,train_X)
actual = pd.Series(train_Y.reshape(-1))
# compute & display results
accuracy,precision, sensitivity, specificity = accuracy_measures(predictions,actual)
print('PERFORMANCE RESULTS:')
print(f'accuracy: {100*round(accuracy,2)}%')
print(f'precision: {100*round(precision,2)}%')
print(f'sensitivity: {100*round(sensitivity,2)}%')
print(f'specificity: {100*round(specificity,2)}%\n')
|
[
"numpy.zeros_like",
"warnings.filterwarnings",
"pandas.read_csv",
"numpy.power",
"numpy.zeros",
"functools.reduce",
"time.time",
"numpy.split",
"numpy.where",
"numpy.array",
"pandas.Series",
"numpy.exp",
"numpy.random.rand",
"numpy.dot",
"pandas.concat"
] |
[((3562, 3584), 'numpy.dot', 'np.dot', (['inputs', 'theta1'], {}), '(inputs, theta1)\n', (3568, 3584), True, 'import numpy as np\n'), ((3617, 3635), 'numpy.dot', 'np.dot', (['a2', 'theta2'], {}), '(a2, theta2)\n', (3623, 3635), True, 'import numpy as np\n'), ((3702, 3728), 'numpy.where', 'np.where', (['(a3 >= 0.5)', '(1)', '(-1)'], {}), '(a3 >= 0.5, 1, -1)\n', (3710, 3728), True, 'import numpy as np\n'), ((3738, 3760), 'pandas.Series', 'pd.Series', (['predictions'], {}), '(predictions)\n', (3747, 3760), True, 'import pandas as pd\n'), ((3812, 3852), 'pandas.concat', 'pd.concat', (['[predictions, actual]'], {'axis': '(1)'}), '([predictions, actual], axis=1)\n', (3821, 3852), True, 'import pandas as pd\n'), ((3975, 4018), 'numpy.where', 'np.where', (['(df.predictions == df.actual)', '(1)', '(0)'], {}), '(df.predictions == df.actual, 1, 0)\n', (3983, 4018), True, 'import numpy as np\n'), ((5192, 5225), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5215, 5225), False, 'import warnings\n'), ((5400, 5428), 'pandas.read_csv', 'pd.read_csv', (['"""blackjack.csv"""'], {}), "('blackjack.csv')\n", (5411, 5428), True, 'import pandas as pd\n'), ((5435, 5531), 'numpy.array', 'np.array', (["model_df[[i for i in model_df.columns if i not in {'correct_action',\n 'outcome'}]]"], {}), "(model_df[[i for i in model_df.columns if i not in {\n 'correct_action', 'outcome'}]])\n", (5443, 5531), True, 'import numpy as np\n'), ((5538, 5617), 'numpy.array', 'np.array', (["model_df[['player_initial_total', 'has_ace', 'dealer_card', 'count']]"], {}), "(model_df[['player_initial_total', 'has_ace', 'dealer_card', 'count']])\n", (5546, 5617), True, 'import numpy as np\n'), ((1403, 1438), 'numpy.zeros', 'np.zeros', (['(size, hidden_layer_size)'], {}), '((size, hidden_layer_size))\n', (1411, 1438), True, 'import numpy as np\n'), ((1716, 1735), 'numpy.dot', 'np.dot', (['train_X', 'wh'], {}), '(train_X, wh)\n', (1722, 1735), True, 'import numpy as np\n'), ((1786, 1800), 'numpy.dot', 'np.dot', (['ah', 'wo'], {}), '(ah, wo)\n', (1792, 1800), True, 'import numpy as np\n'), ((2102, 2140), 'numpy.dot', 'np.dot', (['dzo_dwo.T', '(dcost_dao * dao_dzo)'], {}), '(dzo_dwo.T, dcost_dao * dao_dzo)\n', (2108, 2140), True, 'import numpy as np\n'), ((2266, 2294), 'numpy.dot', 'np.dot', (['dcost_dzo', 'dzo_dah.T'], {}), '(dcost_dzo, dzo_dah.T)\n', (2272, 2294), True, 'import numpy as np\n'), ((2364, 2402), 'numpy.dot', 'np.dot', (['dzh_dwh.T', '(dah_dzh * dcost_dah)'], {}), '(dzh_dwh.T, dah_dzh * dcost_dah)\n', (2370, 2402), True, 'import numpy as np\n'), ((6088, 6139), 'numpy.random.rand', 'np.random.rand', (['train_X.shape[1]', 'hidden_layer_size'], {}), '(train_X.shape[1], hidden_layer_size)\n', (6102, 6139), True, 'import numpy as np\n'), ((6175, 6211), 'numpy.random.rand', 'np.random.rand', (['hidden_layer_size', '(1)'], {}), '(hidden_layer_size, 1)\n', (6189, 6211), True, 'import numpy as np\n'), ((6256, 6307), 'numpy.random.rand', 'np.random.rand', (['train_X.shape[1]', 'hidden_layer_size'], {}), '(train_X.shape[1], hidden_layer_size)\n', (6270, 6307), True, 'import numpy as np\n'), ((6316, 6352), 'numpy.random.rand', 'np.random.rand', (['hidden_layer_size', '(1)'], {}), '(hidden_layer_size, 1)\n', (6330, 6352), True, 'import numpy as np\n'), ((6656, 6667), 'time.time', 'time.time', ([], {}), '()\n', (6665, 6667), False, 'import time\n'), ((6755, 6766), 'time.time', 'time.time', ([], {}), '()\n', (6764, 6766), False, 'import time\n'), ((1050, 1060), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1056, 1060), True, 'import numpy as np\n'), ((1256, 1284), 'numpy.split', 'np.split', (['train_X', 'comm.size'], {}), '(train_X, comm.size)\n', (1264, 1284), True, 'import numpy as np\n'), ((1317, 1345), 'numpy.split', 'np.split', (['train_Y', 'comm.size'], {}), '(train_Y, comm.size)\n', (1325, 1345), True, 'import numpy as np\n'), ((1890, 1915), 'numpy.power', 'np.power', (['(ao - train_Y)', '(2)'], {}), '(ao - train_Y, 2)\n', (1898, 1915), True, 'import numpy as np\n'), ((2944, 2982), 'functools.reduce', 'functools.reduce', (['np.add', 'dcost_wh_buf'], {}), '(np.add, dcost_wh_buf)\n', (2960, 2982), False, 'import functools\n'), ((3231, 3269), 'functools.reduce', 'functools.reduce', (['np.add', 'dcost_wo_buf'], {}), '(np.add, dcost_wo_buf)\n', (3247, 3269), False, 'import functools\n'), ((5629, 5665), 'numpy.array', 'np.array', (["model_df['correct_action']"], {}), "(model_df['correct_action'])\n", (5637, 5665), True, 'import numpy as np\n'), ((2832, 2855), 'numpy.zeros_like', 'np.zeros_like', (['dcost_wh'], {}), '(dcost_wh)\n', (2845, 2855), True, 'import numpy as np\n'), ((3119, 3142), 'numpy.zeros_like', 'np.zeros_like', (['dcost_wo'], {}), '(dcost_wo)\n', (3132, 3142), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 03:12
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import oauth2_backend.models.user
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('last_hierarchy_id', models.CharField(blank=True, max_length=50, null=True)),
('last_module_id', models.CharField(blank=True, max_length=50, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'User',
'permissions': (('list_user', 'Can list user'), ('get_user', 'Can get user')),
'verbose_name_plural': 'Users',
},
managers=[
('objects', oauth2_backend.models.user.UserManager()),
],
),
migrations.CreateModel(
name='Hierarchy',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('logo', models.ImageField(blank=True, default='logo/default.png', null=True, upload_to='logos', verbose_name='Logo')),
('code', models.CharField(blank=True, max_length=60, null=True, verbose_name='Code')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('name_short', models.CharField(blank=True, max_length=40, null=True, verbose_name='Name short')),
('fiscal_creation_date', models.DateField(blank=True, null=True, verbose_name='fiscal creation date')),
('fiscal_address', models.CharField(blank=True, max_length=40, null=True, verbose_name='Fiscal address')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'hierarchy',
'permissions': (('list_hierarchy', 'Can list hierarchy'), ('get_hierarchy', 'Can get hierarchy')),
'verbose_name_plural': 'hierarchys',
},
),
migrations.CreateModel(
name='HierarchyType',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('hierarchy_type', models.CharField(choices=[('INSTITUCION', 'Institucion'), ('FILIAL', 'Filial'), ('FACULTAD', 'Facultad'), ('ESCUELA', 'Escuela'), ('CARRERA', 'Carrera'), ('DEPARTAMENTO_ACAD', 'Departamento acad.'), ('OTHER', 'Other')], max_length=50)),
('name', models.CharField(max_length=60, verbose_name='Name')),
('level', models.BigIntegerField(verbose_name='Level')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'hierarchy type',
'db_table': 'oauth2_backend_hierarchy_type',
'permissions': (('list_hierarchytype', 'Can list hierarchytype'), ('get_hierarchytype', 'Can get hierarchytype')),
'verbose_name_plural': 'hierarchy types',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('module', models.CharField(choices=[('WEB', 'Web informativa'), ('ADMISION', 'Admisión'), ('BACKEND', 'Backend Manager'), ('OTHER', 'Other')], default='BACKEND', max_length=50, verbose_name='module')),
('state', models.CharField(help_text='state or section (estado o grupo de estados)', max_length=50, verbose_name='State or section')),
('title', models.CharField(max_length=50, verbose_name='Title')),
('url', models.CharField(default='#', max_length=150, verbose_name='Url')),
('template_url', models.CharField(default='#', max_length=250, verbose_name='Template url')),
('pos', models.IntegerField(default=1, verbose_name='position')),
('icon', models.CharField(blank=True, default='', max_length=50, null=True, verbose_name='icon')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('is_abstract', models.BooleanField(default=False, verbose_name='Is_abstract')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('router_json', models.TextField(blank=True, null=True, verbose_name='router json')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='childrens', to='oauth2_backend.Menu', verbose_name='parent')),
('permission', models.ForeignKey(blank=True, help_text='NULL if is root', null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Permission', verbose_name='permission')),
],
options={
'verbose_name': 'menu',
'permissions': (('list_menu', 'Can list menu'), ('get_menu', 'Can get menu')),
'verbose_name_plural': 'menus',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('national_id_doc', models.CharField(blank=True, max_length=20, null=True, verbose_name='National identity document')),
('first_name', models.CharField(help_text='primer nombre', max_length=50, verbose_name='First name')),
('other_names', models.CharField(blank=True, help_text='otros nombres', max_length=50, null=True, verbose_name='Other names')),
('last_name', models.CharField(blank=True, help_text='apellido paterno', max_length=50, null=True, verbose_name='Last name')),
('mother_last_name', models.CharField(blank=True, help_text='apellido materno', max_length=50, null=True, verbose_name="Mother's last name")),
('birth_date', models.DateField(blank=True, null=True, verbose_name='birth date')),
('photo', models.ImageField(blank=True, default='persons/default.png', null=True, upload_to='persons', verbose_name='Photo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'Person',
'verbose_name_plural': 'Persons',
},
),
migrations.CreateModel(
name='UserHierarchyGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('access_info', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='start date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='end date')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group', verbose_name='group')),
('hierarchy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Hierarchy', verbose_name='hierarchy')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'user hierarchy group',
'db_table': 'oauth2_backend_user_hierarchy_group',
'verbose_name_plural': 'user hierarchy group',
},
),
migrations.CreateModel(
name='UserHierarchyPermission',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('access_info', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='start date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='end date')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('hierarchy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Hierarchy', verbose_name='hierarchy')),
('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Permission', verbose_name='permission')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'user hierarchy permission',
'db_table': 'oauth2_backend_user_hierarchy_permission',
'verbose_name_plural': 'user hierarchy permission',
},
),
migrations.AddField(
model_name='hierarchy',
name='hierarchy_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hierarchy_set', to='oauth2_backend.HierarchyType'),
),
migrations.AddField(
model_name='hierarchy',
name='immediate_parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='immediate_childrens', to='oauth2_backend.Hierarchy'),
),
migrations.AddField(
model_name='hierarchy',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='childrens', to='oauth2_backend.Hierarchy'),
),
migrations.AddField(
model_name='user',
name='person',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Person', verbose_name='Person'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.UUIDField",
"django.db.models.BigIntegerField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.EmailField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((13032, 13164), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""hierarchy_set"""', 'to': '"""oauth2_backend.HierarchyType"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='hierarchy_set', to='oauth2_backend.HierarchyType')\n", (13049, 13164), False, 'from django.db import migrations, models\n'), ((13292, 13454), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""immediate_childrens"""', 'to': '"""oauth2_backend.Hierarchy"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='immediate_childrens', to=\n 'oauth2_backend.Hierarchy')\n", (13309, 13454), False, 'from django.db import migrations, models\n'), ((13567, 13714), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""childrens"""', 'to': '"""oauth2_backend.Hierarchy"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='childrens', to='oauth2_backend.Hierarchy')\n", (13584, 13714), False, 'from django.db import migrations, models\n'), ((13827, 13971), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""oauth2_backend.Person"""', 'verbose_name': '"""Person"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='oauth2_backend.Person', verbose_name='Person')\n", (13847, 13971), False, 'from django.db import migrations, models\n'), ((14094, 14298), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Specific permissions for this user."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Permission"""', 'verbose_name': '"""user permissions"""'}), "(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions')\n", (14116, 14298), False, 'from django.db import migrations, models\n'), ((607, 664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (623, 664), False, 'from django.db import migrations, models\n'), ((698, 768), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (718, 768), False, 'from django.db import migrations, models\n'), ((804, 975), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates that this user has all permissions without explicitly assigning them."""', 'verbose_name': '"""superuser status"""'}), "(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')\n", (823, 975), False, 'from django.db import migrations, models\n'), ((1329, 1399), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""first name"""'}), "(blank=True, max_length=30, verbose_name='first name')\n", (1345, 1399), False, 'from django.db import migrations, models\n'), ((1432, 1501), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""last name"""'}), "(blank=True, max_length=30, verbose_name='last name')\n", (1448, 1501), False, 'from django.db import migrations, models\n'), ((1530, 1605), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""email address"""'}), "(blank=True, max_length=254, verbose_name='email address')\n", (1547, 1605), False, 'from django.db import migrations, models\n'), ((1637, 1780), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates whether the user can log into this admin site."""', 'verbose_name': '"""staff status"""'}), "(default=False, help_text=\n 'Designates whether the user can log into this admin site.',\n verbose_name='staff status')\n", (1656, 1780), False, 'from django.db import migrations, models\n'), ((1804, 1985), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (1823, 1985), False, 'from django.db import migrations, models\n'), ((2010, 2098), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""date joined"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'date joined')\n", (2030, 2098), False, 'from django.db import migrations, models\n'), ((2119, 2210), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (2135, 2210), False, 'from django.db import migrations, models\n'), ((2247, 2301), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (2263, 2301), False, 'from django.db import migrations, models\n'), ((2339, 2393), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (2355, 2393), False, 'from django.db import migrations, models\n'), ((2427, 2500), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (2447, 2500), False, 'from django.db import migrations, models\n'), ((2537, 2576), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2553, 2576), False, 'from django.db import migrations, models\n'), ((2606, 2857), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""The groups this user belongs to. A user will get all permissions granted to each of their groups."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Group"""', 'verbose_name': '"""groups"""'}), "(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to='auth.Group',\n verbose_name='groups')\n", (2628, 2857), False, 'from django.db import migrations, models\n'), ((3307, 3398), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (3323, 3398), False, 'from django.db import migrations, models\n'), ((3422, 3534), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': '"""logo/default.png"""', 'null': '(True)', 'upload_to': '"""logos"""', 'verbose_name': '"""Logo"""'}), "(blank=True, default='logo/default.png', null=True,\n upload_to='logos', verbose_name='Logo')\n", (3439, 3534), False, 'from django.db import migrations, models\n'), ((3558, 3633), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(60)', 'null': '(True)', 'verbose_name': '"""Code"""'}), "(blank=True, max_length=60, null=True, verbose_name='Code')\n", (3574, 3633), False, 'from django.db import migrations, models\n'), ((3661, 3713), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'verbose_name': '"""Name"""'}), "(max_length=60, verbose_name='Name')\n", (3677, 3713), False, 'from django.db import migrations, models\n'), ((3747, 3833), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)', 'verbose_name': '"""Name short"""'}), "(blank=True, max_length=40, null=True, verbose_name=\n 'Name short')\n", (3763, 3833), False, 'from django.db import migrations, models\n'), ((3872, 3948), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""fiscal creation date"""'}), "(blank=True, null=True, verbose_name='fiscal creation date')\n", (3888, 3948), False, 'from django.db import migrations, models\n'), ((3986, 4076), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)', 'verbose_name': '"""Fiscal address"""'}), "(blank=True, max_length=40, null=True, verbose_name=\n 'Fiscal address')\n", (4002, 4076), False, 'from django.db import migrations, models\n'), ((4104, 4160), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Active"""'}), "(default=True, verbose_name='Active')\n", (4123, 4160), False, 'from django.db import migrations, models\n'), ((4194, 4260), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (4214, 4260), False, 'from django.db import migrations, models\n'), ((4294, 4367), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (4314, 4367), False, 'from django.db import migrations, models\n'), ((4404, 4443), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4420, 4443), False, 'from django.db import migrations, models\n'), ((4832, 4923), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (4848, 4923), False, 'from django.db import migrations, models\n'), ((4957, 5203), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('INSTITUCION', 'Institucion'), ('FILIAL', 'Filial'), ('FACULTAD',\n 'Facultad'), ('ESCUELA', 'Escuela'), ('CARRERA', 'Carrera'), (\n 'DEPARTAMENTO_ACAD', 'Departamento acad.'), ('OTHER', 'Other')]", 'max_length': '(50)'}), "(choices=[('INSTITUCION', 'Institucion'), ('FILIAL',\n 'Filial'), ('FACULTAD', 'Facultad'), ('ESCUELA', 'Escuela'), ('CARRERA',\n 'Carrera'), ('DEPARTAMENTO_ACAD', 'Departamento acad.'), ('OTHER',\n 'Other')], max_length=50)\n", (4973, 5203), False, 'from django.db import migrations, models\n'), ((5219, 5271), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'verbose_name': '"""Name"""'}), "(max_length=60, verbose_name='Name')\n", (5235, 5271), False, 'from django.db import migrations, models\n'), ((5300, 5344), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'verbose_name': '"""Level"""'}), "(verbose_name='Level')\n", (5322, 5344), False, 'from django.db import migrations, models\n'), ((5377, 5433), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Active"""'}), "(default=True, verbose_name='Active')\n", (5396, 5433), False, 'from django.db import migrations, models\n'), ((5467, 5533), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (5487, 5533), False, 'from django.db import migrations, models\n'), ((5567, 5640), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (5587, 5640), False, 'from django.db import migrations, models\n'), ((5677, 5716), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5693, 5716), False, 'from django.db import migrations, models\n'), ((6183, 6274), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (6199, 6274), False, 'from django.db import migrations, models\n'), ((6300, 6497), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('WEB', 'Web informativa'), ('ADMISION', 'Admisión'), ('BACKEND',\n 'Backend Manager'), ('OTHER', 'Other')]", 'default': '"""BACKEND"""', 'max_length': '(50)', 'verbose_name': '"""module"""'}), "(choices=[('WEB', 'Web informativa'), ('ADMISION',\n 'Admisión'), ('BACKEND', 'Backend Manager'), ('OTHER', 'Other')],\n default='BACKEND', max_length=50, verbose_name='module')\n", (6316, 6497), False, 'from django.db import migrations, models\n'), ((6518, 6644), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""state or section (estado o grupo de estados)"""', 'max_length': '(50)', 'verbose_name': '"""State or section"""'}), "(help_text='state or section (estado o grupo de estados)',\n max_length=50, verbose_name='State or section')\n", (6534, 6644), False, 'from django.db import migrations, models\n'), ((6669, 6722), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Title"""'}), "(max_length=50, verbose_name='Title')\n", (6685, 6722), False, 'from django.db import migrations, models\n'), ((6749, 6814), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#"""', 'max_length': '(150)', 'verbose_name': '"""Url"""'}), "(default='#', max_length=150, verbose_name='Url')\n", (6765, 6814), False, 'from django.db import migrations, models\n'), ((6850, 6924), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#"""', 'max_length': '(250)', 'verbose_name': '"""Template url"""'}), "(default='#', max_length=250, verbose_name='Template url')\n", (6866, 6924), False, 'from django.db import migrations, models\n'), ((6951, 7006), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""position"""'}), "(default=1, verbose_name='position')\n", (6970, 7006), False, 'from django.db import migrations, models\n'), ((7034, 7125), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""icon"""'}), "(blank=True, default='', max_length=50, null=True,\n verbose_name='icon')\n", (7050, 7125), False, 'from django.db import migrations, models\n'), ((7154, 7210), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Active"""'}), "(default=True, verbose_name='Active')\n", (7173, 7210), False, 'from django.db import migrations, models\n'), ((7245, 7307), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Is_abstract"""'}), "(default=False, verbose_name='Is_abstract')\n", (7264, 7307), False, 'from django.db import migrations, models\n'), ((7342, 7409), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""description"""'}), "(blank=True, null=True, verbose_name='description')\n", (7358, 7409), False, 'from django.db import migrations, models\n'), ((7444, 7511), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""router json"""'}), "(blank=True, null=True, verbose_name='router json')\n", (7460, 7511), False, 'from django.db import migrations, models\n'), ((7545, 7611), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (7565, 7611), False, 'from django.db import migrations, models\n'), ((7645, 7718), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (7665, 7718), False, 'from django.db import migrations, models\n'), ((7755, 7794), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7771, 7794), False, 'from django.db import migrations, models\n'), ((7824, 7993), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""childrens"""', 'to': '"""oauth2_backend.Menu"""', 'verbose_name': '"""parent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='childrens', to='oauth2_backend.Menu',\n verbose_name='parent')\n", (7841, 7993), False, 'from django.db import migrations, models\n'), ((8018, 8189), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""NULL if is root"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""auth.Permission"""', 'verbose_name': '"""permission"""'}), "(blank=True, help_text='NULL if is root', null=True,\n on_delete=django.db.models.deletion.CASCADE, to='auth.Permission',\n verbose_name='permission')\n", (8035, 8189), False, 'from django.db import migrations, models\n'), ((8533, 8624), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (8549, 8624), False, 'from django.db import migrations, models\n'), ((8659, 8761), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)', 'verbose_name': '"""National identity document"""'}), "(blank=True, max_length=20, null=True, verbose_name=\n 'National identity document')\n", (8675, 8761), False, 'from django.db import migrations, models\n'), ((8790, 8880), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""primer nombre"""', 'max_length': '(50)', 'verbose_name': '"""First name"""'}), "(help_text='primer nombre', max_length=50, verbose_name=\n 'First name')\n", (8806, 8880), False, 'from django.db import migrations, models\n'), ((8910, 9024), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""otros nombres"""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Other names"""'}), "(blank=True, help_text='otros nombres', max_length=50, null\n =True, verbose_name='Other names')\n", (8926, 9024), False, 'from django.db import migrations, models\n'), ((9052, 9166), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""apellido paterno"""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Last name"""'}), "(blank=True, help_text='apellido paterno', max_length=50,\n null=True, verbose_name='Last name')\n", (9068, 9166), False, 'from django.db import migrations, models\n'), ((9202, 9325), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""apellido materno"""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Mother\'s last name"""'}), '(blank=True, help_text=\'apellido materno\', max_length=50,\n null=True, verbose_name="Mother\'s last name")\n', (9218, 9325), False, 'from django.db import migrations, models\n'), ((9355, 9421), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""birth date"""'}), "(blank=True, null=True, verbose_name='birth date')\n", (9371, 9421), False, 'from django.db import migrations, models\n'), ((9450, 9568), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': '"""persons/default.png"""', 'null': '(True)', 'upload_to': '"""persons"""', 'verbose_name': '"""Photo"""'}), "(blank=True, default='persons/default.png', null=True,\n upload_to='persons', verbose_name='Photo')\n", (9467, 9568), False, 'from django.db import migrations, models\n'), ((9598, 9664), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (9618, 9664), False, 'from django.db import migrations, models\n'), ((9698, 9771), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (9718, 9771), False, 'from django.db import migrations, models\n'), ((9808, 9847), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9824, 9847), False, 'from django.db import migrations, models\n'), ((10120, 10211), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (10136, 10211), False, 'from django.db import migrations, models\n'), ((10242, 10281), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10258, 10281), False, 'from django.db import migrations, models\n'), ((10315, 10385), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""start date"""'}), "(blank=True, null=True, verbose_name='start date')\n", (10335, 10385), False, 'from django.db import migrations, models\n'), ((10417, 10485), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""end date"""'}), "(blank=True, null=True, verbose_name='end date')\n", (10437, 10485), False, 'from django.db import migrations, models\n'), ((10519, 10585), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (10539, 10585), False, 'from django.db import migrations, models\n'), ((10619, 10692), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (10639, 10692), False, 'from django.db import migrations, models\n'), ((10729, 10768), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10745, 10768), False, 'from django.db import migrations, models\n'), ((10797, 10903), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""auth.Group"""', 'verbose_name': '"""group"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'auth.Group', verbose_name='group')\n", (10814, 10903), False, 'from django.db import migrations, models\n'), ((10931, 11055), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""oauth2_backend.Hierarchy"""', 'verbose_name': '"""hierarchy"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'oauth2_backend.Hierarchy', verbose_name='hierarchy')\n", (10948, 11055), False, 'from django.db import migrations, models\n'), ((11078, 11195), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""user"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='user')\n", (11095, 11195), False, 'from django.db import migrations, models\n'), ((11562, 11653), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (11578, 11653), False, 'from django.db import migrations, models\n'), ((11684, 11723), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (11700, 11723), False, 'from django.db import migrations, models\n'), ((11757, 11827), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""start date"""'}), "(blank=True, null=True, verbose_name='start date')\n", (11777, 11827), False, 'from django.db import migrations, models\n'), ((11859, 11927), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""end date"""'}), "(blank=True, null=True, verbose_name='end date')\n", (11879, 11927), False, 'from django.db import migrations, models\n'), ((11961, 12027), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (11981, 12027), False, 'from django.db import migrations, models\n'), ((12061, 12134), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""updated at"""'}), "(auto_now=True, null=True, verbose_name='updated at')\n", (12081, 12134), False, 'from django.db import migrations, models\n'), ((12171, 12210), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12187, 12210), False, 'from django.db import migrations, models\n'), ((12243, 12367), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""oauth2_backend.Hierarchy"""', 'verbose_name': '"""hierarchy"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'oauth2_backend.Hierarchy', verbose_name='hierarchy')\n", (12260, 12367), False, 'from django.db import migrations, models\n'), ((12396, 12512), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""auth.Permission"""', 'verbose_name': '"""permission"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'auth.Permission', verbose_name='permission')\n", (12413, 12512), False, 'from django.db import migrations, models\n'), ((12535, 12652), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""user"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='user')\n", (12552, 12652), False, 'from django.db import migrations, models\n')]
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='tensp']/h2",
'price' : "//div[@class='pd-right fr']/p[@class='p-price']",
'category' : "//ul[@class='breadcrumb all']/li/a",
'description' : "//div[@class='p-introduct all']/div[@class='content_tab_all']",
'images' : "//ul[@class='list_small']/li/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'giadungsmart.<EMAIL>'
allowed_domains = ['giadungsmart.com']
start_urls = ['http://giadungsmart.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\d.*\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z-]+\.html($|\?Page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"scrapy.linkextractors.LinkExtractor"
] |
[((756, 808), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'allow': "['/[a-zA-Z0-9-]+\\\\d.*\\\\.html$']"}), "(allow=['/[a-zA-Z0-9-]+\\\\d.*\\\\.html$'])\n", (769, 808), False, 'from scrapy.linkextractors import LinkExtractor\n'), ((832, 892), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'allow': "['/[a-zA-Z-]+\\\\.html($|\\\\?Page=\\\\d+$)']"}), "(allow=['/[a-zA-Z-]+\\\\.html($|\\\\?Page=\\\\d+$)'])\n", (845, 892), False, 'from scrapy.linkextractors import LinkExtractor\n')]
|
from cgbind.log import logger
from copy import deepcopy
import numpy as np
from cgbind.constants import Constants
from rdkit.Chem import AllChem
from scipy.optimize import minimize, Bounds
from scipy.spatial import distance_matrix
from cgbind import geom
from cgbind.atoms import get_vdw_radii
from cgbind.geom import rotation_matrix
from cgbind.geom import calc_com
from cgbind.utils import copy_func
def cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords, with_attraction=True):
"""
Determine the energy using two-body atom-atom repulsion derived from noble
gas dimers where
V_rep(r) = exp(- r/b + a)
where a and b are parameters determined by the atom pairs. Parameters are
suitable to generate V_rep in kcal mol-1
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:param with_attraction: (bool) do or don't return the energy with a
constant attractive term based on the number of
substrate atoms in the structure
:return: energy: (float) Potential energy (V_rep) in kcal mol-1
"""
dist_mat = distance_matrix(cage_coords, subst_coords)
# Matrix with the pairwise additions of the vdW radii
sum_vdw_radii = np.add.outer(np.array(cage.vdw_radii),
np.array(substrate.vdw_radii))
# Magic numbers derived from fitting potentials to noble gas dimers and
# plotting against the sum of vdw radii
b_mat = 0.083214 * sum_vdw_radii - 0.003768
a_mat = 11.576415 * (0.175541 * sum_vdw_radii + 0.316642)
exponent_mat = -(dist_mat / b_mat) + a_mat
energy_mat = np.exp(exponent_mat)
energy = np.sum(energy_mat)
# E is negative for favourable binding but this is a purely repulsive
# function so subtract a number.. which is determined from the best
# classifier for 102 binding affinities (see cgbind paper) 0.4 kcal mol-1
if with_attraction:
return energy - 0.4 * substrate.n_atoms
return energy
def cage_subst_repulsion_and_electrostatic_func(cage, substrate, cage_coords, subst_coords):
"""
Determine the energy of adding a substrate to a cage based on V_rep + V_att
where the attractive term is electrostatic and uses the sum of
q_i q_j / r_ij interaction energies where q_i is the partial atomic charge
on atom i.
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:return:
"""
# Calculate the distance matrix in Bohr (a0) so the energies are in au
dist_mat = Constants.ang2a0 * distance_matrix(cage_coords, subst_coords)
# Charges are already in units of e
prod_charge_mat = np.outer(cage.charges, substrate.charges)
# Compute the pairwise iteration energies as V = q1 q2 / r in atomic units
energy_mat = prod_charge_mat / dist_mat
electrostatic_energy = Constants.ha2kcalmol * np.sum(energy_mat)
repulsive_energy = cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords)
return electrostatic_energy + repulsive_energy
def add_substrate_com(cagesubt):
"""
Add a substrate the centre of a cage defined by its centre of mass (com)
will minimise the energy with respect to rotation of the substrate and the
substrate conformer using cagesubt.energy_func. Will rotate cagesubt.n_init_geom
times and use cagesubt.n_subst_confs number of substrate conformers
:param cagesubt: (CageSubstrateComplex object)
:return: xyzs: (list(list))
"""
logger.info(f'Adding substrate to the cage COM and minimising the energy '
f'with {cagesubt.energy_func.__name__}')
# Minimum energy initialisation and the x parameter array (angles to
# rotate about the x, y, z axes)
min_energy, curr_x = 9999999999.9, np.zeros(3)
# Optimum (minimum energy) conformer
best_coords = None
c, s = cagesubt.cage, cagesubt.substrate
cage_coords = get_centered_cage_coords(c)
c.vdw_radii = [get_vdw_radii(atom) for atom in c.atoms]
if cagesubt.n_subst_confs > 1:
try:
s.gen_confs(n_confs=cagesubt.n_subst_confs)
except (ValueError, RuntimeError):
logger.error('Could not generate substrate conformers')
return None
for i, substrate in enumerate(s.conformers):
subst_coords = get_centered_substrate_coords(substrate)
s.vdw_radii = [get_vdw_radii(atom) for atom in s.atoms]
if s.mol_obj is not None:
s.volume = AllChem.ComputeMolVolume(s.mol_obj, confId=i)
for _ in range(cagesubt.n_init_geom):
rot_angles = 2.0 * np.pi * np.random.rand(3) # rand generates in [0, 1] so multiply with
# Minimise the energy with a BFGS minimiser supporting bounds on
# the values (rotation is periodic)
result = minimize(get_energy, x0=np.array(rot_angles),
args=(c, s, cagesubt.energy_func, cage_coords, subst_coords),
method='L-BFGS-B',
bounds=Bounds(lb=0.0, ub=2*np.pi), tol=0.01)
energy = result.fun
logger.info(f'Energy = {energy:.4f}')
if energy < min_energy:
min_energy = energy
best_coords = get_rotated_subst_coords(result.x, subst_coords)
logger.info(f'Min energy = {min_energy:.4f} kcal mol-1')
cagesubt.binding_energy_kcal = min_energy
if best_coords is not None:
s.set_atoms(coords=best_coords)
c.set_atoms(coords=cage_coords)
return c.atoms + s.atoms
else:
return None
def get_centered_cage_coords(cage):
"""Get the cage coordinates that had been translated to the cage centroid"""
cage_coords = cage.get_coords()
centroid = cage.get_centroid()
return np.array([coord - centroid for coord in cage_coords])
def get_centered_substrate_coords(substrate):
"""Get the substrate coordinates that have been translated to its center of mass"""
substrate.centre()
return substrate.get_coords()
def cat_cage_subst_coords(cage, substrate, cage_coords, substrate_coords):
"""
Concatenate some coordinates into a set of xyzs by adding back the atom
labels from the original xyzs
:param cage:
:param substrate:
:param cage_coords:
:param substrate_coords:
:return:
"""
logger.info('Appending substrate coordinates to cage coordinates')
xyzs = [[cage.xyzs[n][0]] + cage_coords[n].tolist() for n in range(len(cage.xyzs))]
cage.substrate_atom_ids = list(range(len(xyzs), len(xyzs) + len(substrate.xyzs)))
xyzs += [[substrate.xyzs[n][0]] + substrate_coords[n].tolist() for n in range(len(substrate.xyzs))]
return xyzs
def get_rotated_subst_coords(x, subst_coords):
"""Get substrate coordinates that have been rotated by x[0] radians in the
x axis etc."""
x_rot, y_rot, z_rot = x
rot_matrix = np.identity(3)
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.i, theta=x_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.j, theta=y_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.k, theta=z_rot))
return np.array([np.matmul(rot_matrix, coord) for coord in deepcopy(subst_coords)])
def get_energy(x, cage, substrate, energy_func, cage_coords, subst_coords):
"""
Calculate the energy in kcal mol-1 for a particular x, which contains the
rotations in x, y, z cartesian directions
"""
rot_substrate_coords = get_rotated_subst_coords(x, subst_coords)
energy = energy_func(cage, substrate, cage_coords, rot_substrate_coords)
return energy
cage_subst_repulsion_func.__name__ = 'repulsion'
cage_subst_repulsion_and_electrostatic_func.__name__ = 'electrostatic'
cage_subst_repulsion_and_electrostatic_func_est = copy_func(cage_subst_repulsion_and_electrostatic_func)
cage_subst_repulsion_and_electrostatic_func_est.__name__ = 'electrostatic_fast'
energy_funcs = [cage_subst_repulsion_func,
cage_subst_repulsion_and_electrostatic_func,
cage_subst_repulsion_and_electrostatic_func_est]
|
[
"copy.deepcopy",
"numpy.outer",
"numpy.sum",
"cgbind.log.logger.info",
"cgbind.geom.rotation_matrix",
"rdkit.Chem.AllChem.ComputeMolVolume",
"cgbind.utils.copy_func",
"numpy.identity",
"numpy.zeros",
"scipy.spatial.distance_matrix",
"scipy.optimize.Bounds",
"numpy.array",
"numpy.exp",
"numpy.matmul",
"numpy.random.rand",
"cgbind.atoms.get_vdw_radii",
"cgbind.log.logger.error"
] |
[((8114, 8168), 'cgbind.utils.copy_func', 'copy_func', (['cage_subst_repulsion_and_electrostatic_func'], {}), '(cage_subst_repulsion_and_electrostatic_func)\n', (8123, 8168), False, 'from cgbind.utils import copy_func\n'), ((1265, 1307), 'scipy.spatial.distance_matrix', 'distance_matrix', (['cage_coords', 'subst_coords'], {}), '(cage_coords, subst_coords)\n', (1280, 1307), False, 'from scipy.spatial import distance_matrix\n'), ((1787, 1807), 'numpy.exp', 'np.exp', (['exponent_mat'], {}), '(exponent_mat)\n', (1793, 1807), True, 'import numpy as np\n'), ((1821, 1839), 'numpy.sum', 'np.sum', (['energy_mat'], {}), '(energy_mat)\n', (1827, 1839), True, 'import numpy as np\n'), ((2936, 2977), 'numpy.outer', 'np.outer', (['cage.charges', 'substrate.charges'], {}), '(cage.charges, substrate.charges)\n', (2944, 2977), True, 'import numpy as np\n'), ((3770, 3891), 'cgbind.log.logger.info', 'logger.info', (['f"""Adding substrate to the cage COM and minimising the energy with {cagesubt.energy_func.__name__}"""'], {}), "(\n f'Adding substrate to the cage COM and minimising the energy with {cagesubt.energy_func.__name__}'\n )\n", (3781, 3891), False, 'from cgbind.log import logger\n'), ((5606, 5662), 'cgbind.log.logger.info', 'logger.info', (['f"""Min energy = {min_energy:.4f} kcal mol-1"""'], {}), "(f'Min energy = {min_energy:.4f} kcal mol-1')\n", (5617, 5662), False, 'from cgbind.log import logger\n'), ((6090, 6145), 'numpy.array', 'np.array', (['[(coord - centroid) for coord in cage_coords]'], {}), '([(coord - centroid) for coord in cage_coords])\n', (6098, 6145), True, 'import numpy as np\n'), ((6651, 6717), 'cgbind.log.logger.info', 'logger.info', (['"""Appending substrate coordinates to cage coordinates"""'], {}), "('Appending substrate coordinates to cage coordinates')\n", (6662, 6717), False, 'from cgbind.log import logger\n'), ((7209, 7223), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (7220, 7223), True, 'import numpy as np\n'), ((1400, 1424), 'numpy.array', 'np.array', (['cage.vdw_radii'], {}), '(cage.vdw_radii)\n', (1408, 1424), True, 'import numpy as np\n'), ((1459, 1488), 'numpy.array', 'np.array', (['substrate.vdw_radii'], {}), '(substrate.vdw_radii)\n', (1467, 1488), True, 'import numpy as np\n'), ((2830, 2872), 'scipy.spatial.distance_matrix', 'distance_matrix', (['cage_coords', 'subst_coords'], {}), '(cage_coords, subst_coords)\n', (2845, 2872), False, 'from scipy.spatial import distance_matrix\n'), ((3152, 3170), 'numpy.sum', 'np.sum', (['energy_mat'], {}), '(energy_mat)\n', (3158, 3170), True, 'import numpy as np\n'), ((4052, 4063), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4060, 4063), True, 'import numpy as np\n'), ((4240, 4259), 'cgbind.atoms.get_vdw_radii', 'get_vdw_radii', (['atom'], {}), '(atom)\n', (4253, 4259), False, 'from cgbind.atoms import get_vdw_radii\n'), ((7263, 7304), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.i', 'theta': 'x_rot'}), '(axis=geom.i, theta=x_rot)\n', (7278, 7304), False, 'from cgbind.geom import rotation_matrix\n'), ((7345, 7386), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.j', 'theta': 'y_rot'}), '(axis=geom.j, theta=y_rot)\n', (7360, 7386), False, 'from cgbind.geom import rotation_matrix\n'), ((7427, 7468), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.k', 'theta': 'z_rot'}), '(axis=geom.k, theta=z_rot)\n', (7442, 7468), False, 'from cgbind.geom import rotation_matrix\n'), ((4658, 4677), 'cgbind.atoms.get_vdw_radii', 'get_vdw_radii', (['atom'], {}), '(atom)\n', (4671, 4677), False, 'from cgbind.atoms import get_vdw_radii\n'), ((4756, 4801), 'rdkit.Chem.AllChem.ComputeMolVolume', 'AllChem.ComputeMolVolume', (['s.mol_obj'], {'confId': 'i'}), '(s.mol_obj, confId=i)\n', (4780, 4801), False, 'from rdkit.Chem import AllChem\n'), ((5411, 5448), 'cgbind.log.logger.info', 'logger.info', (['f"""Energy = {energy:.4f}"""'], {}), "(f'Energy = {energy:.4f}')\n", (5422, 5448), False, 'from cgbind.log import logger\n'), ((7492, 7520), 'numpy.matmul', 'np.matmul', (['rot_matrix', 'coord'], {}), '(rot_matrix, coord)\n', (7501, 7520), True, 'import numpy as np\n'), ((4441, 4496), 'cgbind.log.logger.error', 'logger.error', (['"""Could not generate substrate conformers"""'], {}), "('Could not generate substrate conformers')\n", (4453, 4496), False, 'from cgbind.log import logger\n'), ((4888, 4905), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4902, 4905), True, 'import numpy as np\n'), ((7534, 7556), 'copy.deepcopy', 'deepcopy', (['subst_coords'], {}), '(subst_coords)\n', (7542, 7556), False, 'from copy import deepcopy\n'), ((5128, 5148), 'numpy.array', 'np.array', (['rot_angles'], {}), '(rot_angles)\n', (5136, 5148), True, 'import numpy as np\n'), ((5328, 5356), 'scipy.optimize.Bounds', 'Bounds', ([], {'lb': '(0.0)', 'ub': '(2 * np.pi)'}), '(lb=0.0, ub=2 * np.pi)\n', (5334, 5356), False, 'from scipy.optimize import minimize, Bounds\n')]
|
import tensorflow as tf
def fixed(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
lr = tf.constant(params['base_lr'])
tf.summary.scalar('learining_rate', lr)
return lr
def exponential_decay(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
assert 'decay_steps' in params, 'decay_steps must in params'
assert 'decay_rate' in params, 'decay_rate must in params'
lr = tf.train.exponential_decay(
learning_rate=params['base_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
decay_rate=params['decay_rate'],
staircase=params.get('staircase', True),
name='learning_rate')
tf.summary.scalar('learining_rate', lr)
return lr
def polynomial_decay(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
assert 'decay_steps' in params, 'decay_steps must in params'
assert 'end_learning_rate' in params, 'end_learning_rate must in params'
assert 'power' in params, 'power must in params'
lr = tf.train.exponential_decay(
learning_rate=params['base_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
end_learning_rate=params['end_learning_rate'],
power=params['power'],
name='learning_rate')
tf.summary.scalar('learining_rate', lr)
return lr
LR_POLICY_MAP = {
'fixed': fixed,
'exponential_decay': exponential_decay,
'polynomial_decay': polynomial_decay,
}
def get_lr_policy_fn(config):
if config.lr_policy not in LR_POLICY_MAP:
raise ValueError('{} is not a valid lr policy type'.format(config.lr_policy))
return LR_POLICY_MAP[config.lr_policy]
|
[
"tensorflow.summary.scalar",
"tensorflow.train.exponential_decay",
"tensorflow.constant"
] |
[((124, 154), 'tensorflow.constant', 'tf.constant', (["params['base_lr']"], {}), "(params['base_lr'])\n", (135, 154), True, 'import tensorflow as tf\n'), ((159, 198), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learining_rate"""', 'lr'], {}), "('learining_rate', lr)\n", (176, 198), True, 'import tensorflow as tf\n'), ((724, 763), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learining_rate"""', 'lr'], {}), "('learining_rate', lr)\n", (741, 763), True, 'import tensorflow as tf\n'), ((1085, 1306), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', ([], {'learning_rate': "params['base_lr']", 'global_step': 'global_step', 'decay_steps': "params['decay_steps']", 'end_learning_rate': "params['end_learning_rate']", 'power': "params['power']", 'name': '"""learning_rate"""'}), "(learning_rate=params['base_lr'], global_step=\n global_step, decay_steps=params['decay_steps'], end_learning_rate=\n params['end_learning_rate'], power=params['power'], name='learning_rate')\n", (1111, 1306), True, 'import tensorflow as tf\n'), ((1351, 1390), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learining_rate"""', 'lr'], {}), "('learining_rate', lr)\n", (1368, 1390), True, 'import tensorflow as tf\n')]
|
import os
from pythongettext import msgfmt
LOCALE_PATH = os.path.join('..', 'discord_birthday_bot', 'locale')
for subdir, dirs, files in os.walk(LOCALE_PATH):
for filename in files:
if filename.endswith('.po'):
path = os.path.join(subdir, filename)
mo_str = msgfmt.Msgfmt(path).get()
mo = open(os.path.splitext(path)[0] + '.mo', 'wb')
mo.write(mo_str)
mo.flush()
mo.close()
print('Translated', path)
|
[
"pythongettext.msgfmt.Msgfmt",
"os.walk",
"os.path.join",
"os.path.splitext"
] |
[((59, 111), 'os.path.join', 'os.path.join', (['""".."""', '"""discord_birthday_bot"""', '"""locale"""'], {}), "('..', 'discord_birthday_bot', 'locale')\n", (71, 111), False, 'import os\n'), ((140, 160), 'os.walk', 'os.walk', (['LOCALE_PATH'], {}), '(LOCALE_PATH)\n', (147, 160), False, 'import os\n'), ((245, 275), 'os.path.join', 'os.path.join', (['subdir', 'filename'], {}), '(subdir, filename)\n', (257, 275), False, 'import os\n'), ((297, 316), 'pythongettext.msgfmt.Msgfmt', 'msgfmt.Msgfmt', (['path'], {}), '(path)\n', (310, 316), False, 'from pythongettext import msgfmt\n'), ((345, 367), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (361, 367), False, 'import os\n')]
|
from rdkit import Chem
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
tqdm.pandas()
GLOBAL_SCALE = ['partial_charge', 'fukui_neu', 'fukui_elec']
ATOM_SCALE = ['NMR']
def check_chemprop_out(df):
invalid = []
for _,r in df.iterrows():
for c in ['partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order', 'bond_length']:
if np.any(pd.isna(r[c])):
invalid.append(r['smiles'])
break
return invalid
def modify_scaled_df(df, scalers):
for index in df.index:
if "H-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(27.7189 - scalers['NMR']["H"].data_min_[0]) / (scalers['NMR']["H"].data_max_[0] - scalers['NMR']["H"].data_min_[0])])
elif "F-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(481.6514 - scalers['NMR']["F"].data_min_[0]) / (scalers['NMR']["F"].data_max_[0] - scalers['NMR']["F"].data_min_[0])])
elif "Cl-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(1150.4265 - scalers['NMR']["Cl"].data_min_[0]) / (scalers['NMR']["Cl"].data_max_[0] - scalers['NMR']["Cl"].data_min_[0])])
elif "Br-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(3126.8978 - scalers['NMR']["Br"].data_min_[0]) / (scalers['NMR']["Br"].data_max_[0] - scalers['NMR']["Br"].data_min_[0])])
return df
def min_max_normalize(df, scalers=None, train_smiles=None):
if train_smiles is not None:
ref_df = df[df.smiles.isin(train_smiles)]
else:
ref_df = df.copy()
if scalers is None:
scalers = get_scaler(ref_df)
for column in GLOBAL_SCALE:
scaler = scalers[column]
df[column] = df[column].apply(lambda x: scaler.transform(x.reshape(-1, 1)).reshape(-1))
def min_max_by_atom(atoms, data, scaler):
data = [scaler[a].transform(np.array([[d]]))[0][0] for a, d in zip(atoms, data)]
return np.array(data)
if ATOM_SCALE:
print('postprocessing atom-wise scaling')
df['atoms'] = df.smiles.apply(lambda x: get_atoms(x))
for column in ATOM_SCALE:
df[column] = df.progress_apply(lambda x: min_max_by_atom(x['atoms'], x[column], scalers[column]), axis=1)
df['bond_order_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_order']), axis=1)
df['distance_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_length']), axis=1)
df = modify_scaled_df(df, scalers)
df = df[['smiles', 'partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order_matrix', 'distance_matrix']]
df = df.set_index('smiles')
return df, scalers
def get_scaler(df):
scalers = {}
for column in GLOBAL_SCALE:
scaler = MinMaxScaler()
data = np.concatenate(df[column].tolist()).reshape(-1, 1)
scaler.fit(data)
scalers[column] = scaler
if ATOM_SCALE:
atoms = df.smiles.apply(lambda x: get_atoms(x))
atoms = np.concatenate(atoms.tolist())
for column in ATOM_SCALE:
data = np.concatenate(df[column].tolist())
data = pd.DataFrame({'atoms': atoms, 'data': data})
data = data.groupby('atoms').agg({'data': lambda x: list(x)})['data'].apply(lambda x: np.array(x)).to_dict()
scalers[column] = {}
for k, d in data.items():
scaler = MinMaxScaler()
scalers[column][k] = scaler.fit(d.reshape(-1, 1))
return scalers
def bond_to_matrix(smiles, bond_vector):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
bond_matrix = np.zeros([len(m.GetAtoms()), len(m.GetAtoms())])
for i, bp in enumerate(bond_vector):
b = m.GetBondWithIdx(i)
bond_matrix[b.GetBeginAtomIdx(), b.GetEndAtomIdx()] = bond_matrix[b.GetEndAtomIdx(), b.GetBeginAtomIdx()] = bp
return bond_matrix
def get_atoms(smiles):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
atoms = [x.GetSymbol() for x in m.GetAtoms()]
return atoms
def minmax_by_element(r, minmax, target):
target = r[target]
elements = r['atoms']
for i, a in enumerate(elements):
target[i] = (target[i] - minmax[a][0]) / (minmax[a][1] - minmax[a][0] + np.finfo(float).eps)
return target
|
[
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler",
"tqdm.tqdm.pandas",
"numpy.finfo",
"numpy.array",
"rdkit.Chem.AddHs",
"pandas.isna",
"rdkit.Chem.MolFromSmiles"
] |
[((179, 192), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (190, 192), False, 'from tqdm import tqdm\n'), ((4407, 4433), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (4425, 4433), False, 'from rdkit import Chem\n'), ((4443, 4456), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (4453, 4456), False, 'from rdkit import Chem\n'), ((4774, 4800), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (4792, 4800), False, 'from rdkit import Chem\n'), ((4810, 4823), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (4820, 4823), False, 'from rdkit import Chem\n'), ((2815, 2829), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2823, 2829), True, 'import numpy as np\n'), ((3620, 3634), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3632, 3634), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((730, 876), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (738, 876), True, 'import numpy as np\n'), ((907, 1040), 'numpy.array', 'np.array', (["[(27.7189 - scalers['NMR']['H'].data_min_[0]) / (scalers['NMR']['H'].\n data_max_[0] - scalers['NMR']['H'].data_min_[0])]"], {}), "([(27.7189 - scalers['NMR']['H'].data_min_[0]) / (scalers['NMR'][\n 'H'].data_max_[0] - scalers['NMR']['H'].data_min_[0])])\n", (915, 1040), True, 'import numpy as np\n'), ((3992, 4036), 'pandas.DataFrame', 'pd.DataFrame', (["{'atoms': atoms, 'data': data}"], {}), "({'atoms': atoms, 'data': data})\n", (4004, 4036), True, 'import pandas as pd\n'), ((475, 488), 'pandas.isna', 'pd.isna', (['r[c]'], {}), '(r[c])\n', (482, 488), True, 'import pandas as pd\n'), ((1128, 1274), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1136, 1274), True, 'import numpy as np\n'), ((1305, 1439), 'numpy.array', 'np.array', (["[(481.6514 - scalers['NMR']['F'].data_min_[0]) / (scalers['NMR']['F'].\n data_max_[0] - scalers['NMR']['F'].data_min_[0])]"], {}), "([(481.6514 - scalers['NMR']['F'].data_min_[0]) / (scalers['NMR'][\n 'F'].data_max_[0] - scalers['NMR']['F'].data_min_[0])])\n", (1313, 1439), True, 'import numpy as np\n'), ((4255, 4269), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4267, 4269), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1528, 1674), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1536, 1674), True, 'import numpy as np\n'), ((1705, 1843), 'numpy.array', 'np.array', (["[(1150.4265 - scalers['NMR']['Cl'].data_min_[0]) / (scalers['NMR']['Cl'].\n data_max_[0] - scalers['NMR']['Cl'].data_min_[0])]"], {}), "([(1150.4265 - scalers['NMR']['Cl'].data_min_[0]) / (scalers['NMR']\n ['Cl'].data_max_[0] - scalers['NMR']['Cl'].data_min_[0])])\n", (1713, 1843), True, 'import numpy as np\n'), ((5103, 5118), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5111, 5118), True, 'import numpy as np\n'), ((1932, 2078), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1940, 2078), True, 'import numpy as np\n'), ((2109, 2247), 'numpy.array', 'np.array', (["[(3126.8978 - scalers['NMR']['Br'].data_min_[0]) / (scalers['NMR']['Br'].\n data_max_[0] - scalers['NMR']['Br'].data_min_[0])]"], {}), "([(3126.8978 - scalers['NMR']['Br'].data_min_[0]) / (scalers['NMR']\n ['Br'].data_max_[0] - scalers['NMR']['Br'].data_min_[0])])\n", (2117, 2247), True, 'import numpy as np\n'), ((2747, 2762), 'numpy.array', 'np.array', (['[[d]]'], {}), '([[d]])\n', (2755, 2762), True, 'import numpy as np\n'), ((4135, 4146), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4143, 4146), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The trainer program for Adelaide_EA."""
import logging
import vega
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.metrics import calc_model_flops_params
from vega.core.trainer.callbacks import Callback
if vega.is_torch_backend():
import torch
elif vega.is_tf_backend():
import tensorflow as tf
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AdelaideEATrainerCallback(Callback):
"""Construct the trainer of Adelaide-EA."""
def before_train(self, logs=None):
"""Be called before the training process."""
self.config = self.trainer.config
if vega.is_torch_backend():
count_input = torch.FloatTensor(1, 3, 192, 192).cuda()
elif vega.is_tf_backend():
tf.reset_default_graph()
count_input = tf.random_uniform([1, 192, 192, 3], dtype=tf.float32)
flops_count, params_count = calc_model_flops_params(self.trainer.model, count_input)
self.flops_count, self.params_count = flops_count * 1e-9, params_count * 1e-3
logger.info("Flops: {:.2f} G, Params: {:.1f} K".format(self.flops_count, self.params_count))
if self.flops_count > self.config.flops_limit:
logger.info("Flop too large!")
self.trainer.skip_train = True
def after_epoch(self, epoch, logs=None):
"""Update gflops and kparams."""
summary_perfs = logs.get('summary_perfs', {})
summary_perfs.update({'gflops': self.flops_count, 'kparams': self.params_count})
logs.update({'summary_perfs': summary_perfs})
def make_batch(self, batch):
"""Make batch for each training step."""
input = batch["data"]
target = batch["mask"]
if self.config.cuda:
input = input.cuda()
target = target.cuda()
return input, target
|
[
"tensorflow.random_uniform",
"vega.is_torch_backend",
"vega.core.common.class_factory.ClassFactory.register",
"tensorflow.reset_default_graph",
"torch.FloatTensor",
"vega.core.metrics.calc_model_flops_params",
"vega.is_tf_backend",
"logging.getLogger"
] |
[((681, 704), 'vega.is_torch_backend', 'vega.is_torch_backend', ([], {}), '()\n', (702, 704), False, 'import vega\n'), ((788, 815), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (805, 815), False, 'import logging\n'), ((819, 860), 'vega.core.common.class_factory.ClassFactory.register', 'ClassFactory.register', (['ClassType.CALLBACK'], {}), '(ClassType.CALLBACK)\n', (840, 860), False, 'from vega.core.common.class_factory import ClassFactory, ClassType\n'), ((728, 748), 'vega.is_tf_backend', 'vega.is_tf_backend', ([], {}), '()\n', (746, 748), False, 'import vega\n'), ((1098, 1121), 'vega.is_torch_backend', 'vega.is_torch_backend', ([], {}), '()\n', (1119, 1121), False, 'import vega\n'), ((1378, 1434), 'vega.core.metrics.calc_model_flops_params', 'calc_model_flops_params', (['self.trainer.model', 'count_input'], {}), '(self.trainer.model, count_input)\n', (1401, 1434), False, 'from vega.core.metrics import calc_model_flops_params\n'), ((1203, 1223), 'vega.is_tf_backend', 'vega.is_tf_backend', ([], {}), '()\n', (1221, 1223), False, 'import vega\n'), ((1237, 1261), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1259, 1261), True, 'import tensorflow as tf\n'), ((1288, 1341), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, 192, 192, 3]'], {'dtype': 'tf.float32'}), '([1, 192, 192, 3], dtype=tf.float32)\n', (1305, 1341), True, 'import tensorflow as tf\n'), ((1149, 1182), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(3)', '(192)', '(192)'], {}), '(1, 3, 192, 192)\n', (1166, 1182), False, 'import torch\n')]
|
import os
import re
from flask import g, jsonify, request
from flask_httpauth import HTTPTokenAuth # HTTPBasicAuth
from app.models import User
from app.v1 import api
from app.v1.errors import forbidden, unauthorized
from config import config
auth = HTTPTokenAuth()
@auth.verify_token
def verify_token(token):
g.current_user = None
url = request.path
for i in config[os.getenv('Flask_config') or 'default'].White_list:
if re.match(i, url):
return True
user = User.verify_auth_token(token)
if not user:
return False
g.current_user = user
return True
@api.route('/login', methods=['POST'])
def login():
print('ok')
print(request.form)
username = request.form.get('username')
password = request.form.get('password')
print(username)
print('username:' + username if username else '')
print('password:'+ password if username else '')
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
return jsonify({'error': 'Unauthorized Access'})
g.user = user
token = user.generate_auth_token(3600)
return jsonify({'token': token.decode('ascii'), 'code': 20000})
@api.before_request
@auth.login_required
def before_request():
url = request.path
for i in config[os.getenv('Flask_config') or 'default'].White_list:
if re.match(i, url):
return
if not g.current_user:
return forbidden('Unconfirmed account')
# if not g.current_user or not g.current_user.confirmed:
# return forbidden('Unconfirmed account')
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api.route('/hello')
@auth.login_required
def hello():
return jsonify({'k': 'hello'})
# auth = HTTPBasicAuth()
#
#
# @auth.verify_password
# def verify_password(username_or_token, password):
# # first try to authenticate by token
# user = User.verify_auth_token(username_or_token)
# if not user:
# # try to authenticate with username/password
# user = User.query.filter_by(username=username_or_token).first()
# if not user or not user.verify_password(password):
# return False
# g.user = user
# return True
#
#
# @api.route('/token')
# @auth.login_required
# def get_auth_token():
# token = g.user.generate_auth_token()
# return jsonify({'token': token.decode('ascii')})
#
#
|
[
"flask.request.form.get",
"re.match",
"app.v1.errors.unauthorized",
"flask.jsonify",
"app.v1.api.route",
"app.models.User.query.filter_by",
"app.v1.errors.forbidden",
"flask_httpauth.HTTPTokenAuth",
"app.models.User.verify_auth_token",
"os.getenv"
] |
[((253, 268), 'flask_httpauth.HTTPTokenAuth', 'HTTPTokenAuth', ([], {}), '()\n', (266, 268), False, 'from flask_httpauth import HTTPTokenAuth\n'), ((613, 650), 'app.v1.api.route', 'api.route', (['"""/login"""'], {'methods': "['POST']"}), "('/login', methods=['POST'])\n", (622, 650), False, 'from app.v1 import api\n'), ((1703, 1722), 'app.v1.api.route', 'api.route', (['"""/hello"""'], {}), "('/hello')\n", (1712, 1722), False, 'from app.v1 import api\n'), ((500, 529), 'app.models.User.verify_auth_token', 'User.verify_auth_token', (['token'], {}), '(token)\n', (522, 529), False, 'from app.models import User\n'), ((719, 747), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (735, 747), False, 'from flask import g, jsonify, request\n'), ((763, 791), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (779, 791), False, 'from flask import g, jsonify, request\n'), ((1664, 1699), 'app.v1.errors.unauthorized', 'unauthorized', (['"""Invalid credentials"""'], {}), "('Invalid credentials')\n", (1676, 1699), False, 'from app.v1.errors import forbidden, unauthorized\n'), ((1768, 1791), 'flask.jsonify', 'jsonify', (["{'k': 'hello'}"], {}), "({'k': 'hello'})\n", (1775, 1791), False, 'from flask import g, jsonify, request\n'), ((447, 463), 're.match', 're.match', (['i', 'url'], {}), '(i, url)\n', (455, 463), False, 'import re\n'), ((1048, 1089), 'flask.jsonify', 'jsonify', (["{'error': 'Unauthorized Access'}"], {}), "({'error': 'Unauthorized Access'})\n", (1055, 1089), False, 'from flask import g, jsonify, request\n'), ((1390, 1406), 're.match', 're.match', (['i', 'url'], {}), '(i, url)\n', (1398, 1406), False, 'import re\n'), ((1469, 1501), 'app.v1.errors.forbidden', 'forbidden', (['"""Unconfirmed account"""'], {}), "('Unconfirmed account')\n", (1478, 1501), False, 'from app.v1.errors import forbidden, unauthorized\n'), ((930, 969), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (950, 969), False, 'from app.models import User\n'), ((384, 409), 'os.getenv', 'os.getenv', (['"""Flask_config"""'], {}), "('Flask_config')\n", (393, 409), False, 'import os\n'), ((1327, 1352), 'os.getenv', 'os.getenv', (['"""Flask_config"""'], {}), "('Flask_config')\n", (1336, 1352), False, 'import os\n')]
|
"""to be run from root directory
"""
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
django.setup()
from django.db.models.functions import Cast
from django.db.models.fields import DateField
from zoo_checks.models import AnimalCount, Enclosure, GroupCount, SpeciesCount
from django.db.models.functions import TruncDate
from django.utils import timezone
from zoo_checks.helpers import today_time
enclosures = Enclosure.objects.filter(name__in=["Australia", "Barramundi (ARG 1A)"])
tzinfo = "America/New York"
num_days = 1
animal_counts = (
AnimalCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "animal_id")
.distinct("dateonlycounted", "animal_id")
)
group_counts = (
GroupCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "group_id")
.distinct("dateonlycounted", "group_id")
)
species_counts = (
SpeciesCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "species_id")
.distinct("dateonlycounted", "species_id")
)
animal_dict = animal_counts.values()[0]
group_dict = group_counts.values()[0]
species_dict = species_counts.values()[0]
print(animal_dict)
print(animal_dict.keys())
print(group_dict)
print(group_dict.keys())
print(species_dict)
print(species_dict.keys())
|
[
"os.environ.setdefault",
"django.setup",
"django.utils.timezone.localtime",
"django.db.models.functions.TruncDate",
"zoo_checks.models.Enclosure.objects.filter",
"django.utils.timezone.timedelta",
"zoo_checks.helpers.today_time"
] |
[((64, 130), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""mysite.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'mysite.settings')\n", (85, 130), False, 'import os\n'), ((131, 145), 'django.setup', 'django.setup', ([], {}), '()\n', (143, 145), False, 'import django\n'), ((455, 526), 'zoo_checks.models.Enclosure.objects.filter', 'Enclosure.objects.filter', ([], {'name__in': "['Australia', 'Barramundi (ARG 1A)']"}), "(name__in=['Australia', 'Barramundi (ARG 1A)'])\n", (479, 526), False, 'from zoo_checks.models import AnimalCount, Enclosure, GroupCount, SpeciesCount\n'), ((813, 856), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""datetimecounted"""'], {'tzinfo': 'tzinfo'}), "('datetimecounted', tzinfo=tzinfo)\n", (822, 856), False, 'from django.db.models.functions import TruncDate\n'), ((1194, 1237), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""datetimecounted"""'], {'tzinfo': 'tzinfo'}), "('datetimecounted', tzinfo=tzinfo)\n", (1203, 1237), False, 'from django.db.models.functions import TruncDate\n'), ((1577, 1620), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""datetimecounted"""'], {'tzinfo': 'tzinfo'}), "('datetimecounted', tzinfo=tzinfo)\n", (1586, 1620), False, 'from django.db.models.functions import TruncDate\n'), ((682, 702), 'django.utils.timezone.localtime', 'timezone.localtime', ([], {}), '()\n', (700, 702), False, 'from django.utils import timezone\n'), ((1063, 1083), 'django.utils.timezone.localtime', 'timezone.localtime', ([], {}), '()\n', (1081, 1083), False, 'from django.utils import timezone\n'), ((1446, 1466), 'django.utils.timezone.localtime', 'timezone.localtime', ([], {}), '()\n', (1464, 1466), False, 'from django.utils import timezone\n'), ((732, 744), 'zoo_checks.helpers.today_time', 'today_time', ([], {}), '()\n', (742, 744), False, 'from zoo_checks.helpers import today_time\n'), ((747, 775), 'django.utils.timezone.timedelta', 'timezone.timedelta', (['num_days'], {}), '(num_days)\n', (765, 775), False, 'from django.utils import timezone\n'), ((1113, 1125), 'zoo_checks.helpers.today_time', 'today_time', ([], {}), '()\n', (1123, 1125), False, 'from zoo_checks.helpers import today_time\n'), ((1128, 1156), 'django.utils.timezone.timedelta', 'timezone.timedelta', (['num_days'], {}), '(num_days)\n', (1146, 1156), False, 'from django.utils import timezone\n'), ((1496, 1508), 'zoo_checks.helpers.today_time', 'today_time', ([], {}), '()\n', (1506, 1508), False, 'from zoo_checks.helpers import today_time\n'), ((1511, 1539), 'django.utils.timezone.timedelta', 'timezone.timedelta', (['num_days'], {}), '(num_days)\n', (1529, 1539), False, 'from django.utils import timezone\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_model.ipynb (unless otherwise specified).
__all__ = ['DownSampler', 'TemporalEncoder', 'condition_time', 'ConditionTime', 'feat2image', 'MetNet',
'metnet_splitter']
# Cell
from .layers import *
from fastai.vision.all import *
# Cell
def DownSampler(in_channels):
return nn.Sequential(nn.Conv2d(in_channels, 160, 3, padding=1),
nn.MaxPool2d((2,2), stride=2),
nn.BatchNorm2d(160),
nn.Conv2d(160, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, 3, padding=1),
nn.MaxPool2d((2,2), stride=2)
)
# Cell
class TemporalEncoder(Module):
def __init__(self, in_channels, out_channels=384, ks=3, n_layers=1):
self.rnn = ConvGRU(in_channels, out_channels, (ks, ks), n_layers, batch_first=True)
def forward(self, x):
x, h = self.rnn(x)
return (x, h[-1])
# Cell
def condition_time(x, i=0, size=(12, 16), seq_len=15):
"create one hot encoded time image-layers, i in [1, seq_len]"
assert i<seq_len
times = (torch.eye(seq_len, dtype=x.dtype, device=x.device)[i]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1,*size, dtype=x.dtype, device=x.device)
return times * ones
# Cell
class ConditionTime(Module):
"Condition Time on a stack of images, adds `horizon` channels to image"
def __init__(self, horizon, ch_dim=2):
self.horizon = horizon
self.ch_dim = ch_dim
def forward(self, x, fstep=0):
"x stack of images, fsteps"
bs, seq_len, ch, h, w = x.shape
ct = condition_time(x, fstep, (h,w), seq_len=self.horizon).repeat(bs, seq_len, 1,1,1)
x = torch.cat([x,ct], dim=self.ch_dim)
assert x.shape[self.ch_dim] == (ch + self.horizon) #check if it makes sense
return x
# Cell
def feat2image(x, target_size=(128,128)):
"This idea comes from MetNet"
x = x.transpose(1,2)
return x.unsqueeze(-1).unsqueeze(-1) * x.new_ones(1,1,1,*target_size)
# Cell
from axial_attention import AxialAttention
# Cell
class MetNet(Module):
def __init__(self, image_encoder, hidden_dim, ks=3, n_layers=1, n_att_layers=1,
head=None, horizon=3, n_feats=0, p=0.2, debug=False):
self.horizon = horizon
self.n_feats = n_feats
self.drop = nn.Dropout(p)
nf = 256 #from the simple image encoder
self.image_encoder = TimeDistributed(image_encoder)
self.ct = ConditionTime(horizon)
self.temporal_enc = TemporalEncoder(nf, hidden_dim, ks=ks, n_layers=n_layers)
self.temporal_agg = nn.Sequential(*[AxialAttention(dim=hidden_dim, dim_index=1, heads=8, num_dimensions=2) for _ in range(n_att_layers)])
if head is None:
self.head = Noop()
else:
self.head = head
self.debug = debug
def encode_timestep(self, x, fstep=1):
if self.debug: print(f'Encode Timestep:(i={fstep})')
if self.debug: print(f' input shape: {x.shape}')
#Condition Time
x = self.ct(x, fstep)
if self.debug: print(f' CondTime->x.shape: {x.shape}')
##CNN
x = self.image_encoder(x)
if self.debug: print(f' encoded images shape: {x.shape}')
#Temporal Encoder
_, state = self.temporal_enc(self.drop(x))
if self.debug: print(f' temp_enc out shape: {state.shape}')
return self.temporal_agg(state)
def forward(self, imgs, feats):
"""It takes a rank 5 tensor
- imgs [bs, seq_len, channels, h, w]
- feats [bs, n_feats, seq_len]"""
if self.debug: print(f' Input -> (imgs: {imgs.shape}, feats: {feats.shape})')
#stack feature as images
if self.n_feats>0:
feats = feat2image(feats, target_size=imgs.shape[-2:])
imgs = torch.cat([imgs, feats], dim=2)
if self.debug: print(f' augmented imgs: {imgs.shape}')
#Compute all timesteps, probably can be parallelized
res = []
for i in range(self.horizon):
x_i = self.encode_timestep(imgs, i)
out = self.head(x_i)
res.append(out)
res = torch.stack(res, dim=1).squeeze()
if self.debug: print(f'{res.shape=}')
return res
# Cell
def metnet_splitter(m):
"A simple param splitter for MetNet"
return [params(m.image_encoder), params(m.te)+params(m.head)]
|
[
"axial_attention.AxialAttention"
] |
[((2831, 2901), 'axial_attention.AxialAttention', 'AxialAttention', ([], {'dim': 'hidden_dim', 'dim_index': '(1)', 'heads': '(8)', 'num_dimensions': '(2)'}), '(dim=hidden_dim, dim_index=1, heads=8, num_dimensions=2)\n', (2845, 2901), False, 'from axial_attention import AxialAttention\n')]
|
import logging
logger = logging.getLogger("__name__")
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"{asctime} - {name} - {levelname} - {message}", datefmt="%H:%M:%S", style="{"
)
console.setFormatter(formatter)
logger.addHandler(console)
## messages
logger.debug("Сообщение уровня debug: %s", "SOS")
logger.info("Сообщение уровня info")
logger.warning("Сообщение уровня warning")
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger"
] |
[((25, 54), 'logging.getLogger', 'logging.getLogger', (['"""__name__"""'], {}), "('__name__')\n", (42, 54), False, 'import logging\n'), ((97, 120), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (118, 120), False, 'import logging\n'), ((165, 266), 'logging.Formatter', 'logging.Formatter', (['"""{asctime} - {name} - {levelname} - {message}"""'], {'datefmt': '"""%H:%M:%S"""', 'style': '"""{"""'}), "('{asctime} - {name} - {levelname} - {message}', datefmt=\n '%H:%M:%S', style='{')\n", (182, 266), False, 'import logging\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.