code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import argparse
import json
import papermill as pm
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input Jupyter notebook")
parser.add_argument("output", help="output Jupyter notebook")
parser.add_argument("parameters", help="parameter file in JSON")
args = parser.parse_args()
parameters = json.load(open(args.parameters), parse_float=float)
pm.execute_notebook(args.input, args.output, parameters)
|
[
"papermill.execute_notebook",
"argparse.ArgumentParser"
] |
[((61, 86), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (84, 86), False, 'import argparse\n'), ((367, 423), 'papermill.execute_notebook', 'pm.execute_notebook', (['args.input', 'args.output', 'parameters'], {}), '(args.input, args.output, parameters)\n', (386, 423), True, 'import papermill as pm\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import thread
from time import sleep
import datetime
import time
class ExampleEvent:
def __init__(self, train):
print("ExampleEvent init");
self.train = train
def sensorDetected(self, rId):
pass
def stopAndTurn(self, *args):
print("[{}] Detected ExampleEvent".format(datetime.datetime.now()));
speed = self.train.getSpeed()
self.train.onStop()
sleep(5)
self.train.toggleDirection()
self.train.setSpeed(speed, True)
def directionToggled(self, direction):
pass
|
[
"datetime.datetime.now",
"time.sleep"
] |
[((1024, 1032), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (1029, 1032), False, 'from time import sleep\n'), ((912, 935), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (933, 935), False, 'import datetime\n')]
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for Kubeflow-based orchestrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from typing import Text
from kfp import dsl
from tfx.orchestration.experimental.runtime_parameter import runtime_string_parameter
def replace_placeholder(serialized_component: Text) -> Text:
"""Replaces the RuntimeParameter placeholders with kfp.dsl.PipelineParam."""
placeholders = re.findall(runtime_string_parameter.PARAMETER_PATTERN,
serialized_component)
for placeholder in placeholders:
parameter = runtime_string_parameter.RuntimeStringParameter.parse(
placeholder)
dsl_parameter = dsl.PipelineParam(name=parameter.name)
serialized_component = serialized_component.replace(placeholder,
str(dsl_parameter))
return serialized_component
|
[
"tfx.orchestration.experimental.runtime_parameter.runtime_string_parameter.RuntimeStringParameter.parse",
"re.findall",
"kfp.dsl.PipelineParam"
] |
[((1089, 1165), 're.findall', 're.findall', (['runtime_string_parameter.PARAMETER_PATTERN', 'serialized_component'], {}), '(runtime_string_parameter.PARAMETER_PATTERN, serialized_component)\n', (1099, 1165), False, 'import re\n'), ((1246, 1312), 'tfx.orchestration.experimental.runtime_parameter.runtime_string_parameter.RuntimeStringParameter.parse', 'runtime_string_parameter.RuntimeStringParameter.parse', (['placeholder'], {}), '(placeholder)\n', (1299, 1312), False, 'from tfx.orchestration.experimental.runtime_parameter import runtime_string_parameter\n'), ((1342, 1380), 'kfp.dsl.PipelineParam', 'dsl.PipelineParam', ([], {'name': 'parameter.name'}), '(name=parameter.name)\n', (1359, 1380), False, 'from kfp import dsl\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-04-25 23:12:05
# @Author : Bluethon (<EMAIL>)
# @Link : http://github.com/bluethon
from flask import Blueprint
auth = Blueprint('auth', __name__)
# noinspection PyUnresolvedReferences
from . import views
|
[
"flask.Blueprint"
] |
[((190, 217), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (199, 217), False, 'from flask import Blueprint\n')]
|
# -*- coding: utf-8 -*-
"""
description:
"""
import unittest
import os
from bs4 import BeautifulSoup
from app.crawler import visible, extract_images_links, extract_text
from app.settings import BASE_DIR
class TestCrawler(unittest.TestCase):
def test_visible(self):
"""
in test_crawler.html all visible text contains 't' and non-visible 'f'
"""
with open(os.path.join(BASE_DIR, 'app/tests/test_html/test_crawler.html')) as html:
soup = BeautifulSoup(html, 'html.parser')
data = soup.findAll(text=True)
result = {text.strip() for text in filter(visible, data) if text.strip()}
self.assertEqual({'t'}, result)
result = [elem for elem in data if visible(elem)]
self.assertTrue(all(result))
class TestCrawlerAsync(unittest.IsolatedAsyncioTestCase):
async def test_extract_text(self):
with open(os.path.join(BASE_DIR, 'app/tests/test_html/example.html')) as html:
crawled = await extract_text(html)
expected = '''Example Domain
This domain is for use in illustrative examples in documents. You may use this
domain in literature without prior coordination or asking for permission.
More information...'''
self.assertEqual(expected, crawled)
async def test_extract_images_links(self):
with open(os.path.join(BASE_DIR, 'app/tests/test_html/test_crawler.html')) as html:
crawled = await extract_images_links(html)
expected = {'test1', 'test2', 'test3'}
self.assertEqual(expected, crawled)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"app.crawler.visible",
"app.crawler.extract_images_links",
"bs4.BeautifulSoup",
"os.path.join",
"app.crawler.extract_text"
] |
[((1597, 1612), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1610, 1612), False, 'import unittest\n'), ((487, 521), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (500, 521), False, 'from bs4 import BeautifulSoup\n'), ((394, 457), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""app/tests/test_html/test_crawler.html"""'], {}), "(BASE_DIR, 'app/tests/test_html/test_crawler.html')\n", (406, 457), False, 'import os\n'), ((728, 741), 'app.crawler.visible', 'visible', (['elem'], {}), '(elem)\n', (735, 741), False, 'from app.crawler import visible, extract_images_links, extract_text\n'), ((897, 955), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""app/tests/test_html/example.html"""'], {}), "(BASE_DIR, 'app/tests/test_html/example.html')\n", (909, 955), False, 'import os\n'), ((994, 1012), 'app.crawler.extract_text', 'extract_text', (['html'], {}), '(html)\n', (1006, 1012), False, 'from app.crawler import visible, extract_images_links, extract_text\n'), ((1344, 1407), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""app/tests/test_html/test_crawler.html"""'], {}), "(BASE_DIR, 'app/tests/test_html/test_crawler.html')\n", (1356, 1407), False, 'import os\n'), ((1446, 1472), 'app.crawler.extract_images_links', 'extract_images_links', (['html'], {}), '(html)\n', (1466, 1472), False, 'from app.crawler import visible, extract_images_links, extract_text\n')]
|
#! /usr/bin/env python3
import random
import threading
import time
class TestThread(threading.Thread):
def run(self):
for loop_number in range(10):
print("{0} Loop: {1}".format(self.name, loop_number))
time.sleep(random.randint(1, 5))
# Construct threads.
threads = []
for thread_number in range(5):
thread = TestThread()
thread.name = "thread-{0}".format(thread_number)
threads.append(thread)
# Start threads.
for thread in threads:
thread.start()
# Wait for threads to stop.
for thread in threads:
thread.join()
|
[
"random.randint"
] |
[((254, 274), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (268, 274), False, 'import random\n')]
|
from carotte import Carotte
from app.connector import *
my_app = Carotte()
@my_app.task
def refreshYoutube(author=None):
youtube = youtubeConnector(username="")
log = youtube.check()
return log
|
[
"carotte.Carotte"
] |
[((66, 75), 'carotte.Carotte', 'Carotte', ([], {}), '()\n', (73, 75), False, 'from carotte import Carotte\n')]
|
"""Word stemming utilities for Sphinx."""
import warnings
import snowballstemmer
from sphinx.deprecation import RemovedInSphinx70Warning
class PorterStemmer:
def __init__(self):
warnings.warn(f"{self.__class__.__name__} is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=2)
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, p: str, i: int, j: int) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(p)
class BaseStemmer:
def __init__(self):
warnings.warn(f"{self.__class__.__name__} is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=3)
def stem(self, word: str) -> str:
raise NotImplementedError
class PyStemmer(BaseStemmer):
def __init__(self): # NoQA
super().__init__()
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, word: str) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(word)
class StandardStemmer(BaseStemmer):
def __init__(self): # NoQA
super().__init__()
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, word: str) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(word)
def get_stemmer() -> BaseStemmer:
warnings.warn("get_stemmer() is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=2)
return PyStemmer()
|
[
"warnings.warn",
"snowballstemmer.stemmer"
] |
[((2019, 2161), 'warnings.warn', 'warnings.warn', (['"""get_stemmer() is deprecated, use snowballstemmer.stemmer(\'porter\') instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(2)'}), '(\n "get_stemmer() is deprecated, use snowballstemmer.stemmer(\'porter\') instead."\n , RemovedInSphinx70Warning, stacklevel=2)\n', (2032, 2161), False, 'import warnings\n'), ((195, 350), 'warnings.warn', 'warnings.warn', (['f"""{self.__class__.__name__} is deprecated, use snowballstemmer.stemmer(\'porter\') instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(2)'}), '(\n f"{self.__class__.__name__} is deprecated, use snowballstemmer.stemmer(\'porter\') instead."\n , RemovedInSphinx70Warning, stacklevel=2)\n', (208, 350), False, 'import warnings\n'), ((411, 444), 'snowballstemmer.stemmer', 'snowballstemmer.stemmer', (['"""porter"""'], {}), "('porter')\n", (434, 444), False, 'import snowballstemmer\n'), ((505, 678), 'warnings.warn', 'warnings.warn', (['f"""{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(2)'}), '(\n f"{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."\n , RemovedInSphinx70Warning, stacklevel=2)\n', (518, 678), False, 'import warnings\n'), ((809, 964), 'warnings.warn', 'warnings.warn', (['f"""{self.__class__.__name__} is deprecated, use snowballstemmer.stemmer(\'porter\') instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(3)'}), '(\n f"{self.__class__.__name__} is deprecated, use snowballstemmer.stemmer(\'porter\') instead."\n , RemovedInSphinx70Warning, stacklevel=3)\n', (822, 964), False, 'import warnings\n'), ((1189, 1222), 'snowballstemmer.stemmer', 'snowballstemmer.stemmer', (['"""porter"""'], {}), "('porter')\n", (1212, 1222), False, 'import snowballstemmer\n'), ((1270, 1443), 'warnings.warn', 'warnings.warn', (['f"""{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(2)'}), '(\n f"{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."\n , RemovedInSphinx70Warning, stacklevel=2)\n', (1283, 1443), False, 'import warnings\n'), ((1644, 1677), 'snowballstemmer.stemmer', 'snowballstemmer.stemmer', (['"""porter"""'], {}), "('porter')\n", (1667, 1677), False, 'import snowballstemmer\n'), ((1725, 1898), 'warnings.warn', 'warnings.warn', (['f"""{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."""', 'RemovedInSphinx70Warning'], {'stacklevel': '(2)'}), '(\n f"{self.__class__.__name__}.stem() is deprecated, use snowballstemmer.stemmer(\'porter\').stemWord() instead."\n , RemovedInSphinx70Warning, stacklevel=2)\n', (1738, 1898), False, 'import warnings\n')]
|
#! /usr/bin/env python3
"""
Copyright (c) 2020 Deutsches Elektronen-Synchrotron DESY
See LICENSE.txt for license details.
"""
import enum
import sys
import unittest
from systemrdl.rdltypes import AccessType
from hectare._hectare_types import Field, Register
from hectare._HectareVhdlGen import HectareVhdlGen
class TestHectareVhdlGen(unittest.TestCase):
DATA_W_BYTES = 4
def test_single_addr(self):
reg = Register("myreg", 8)
s = HectareVhdlGen._gen_single_addr(reg, self.DATA_W_BYTES)
self.assertEqual(s, "constant C_ADDR_MYREG : integer := 2;")
def test_single_field_range(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_field_range("myreg", field)
self.assertEqual(l[0], "constant C_FIELD_MYREG_MYFIELD_MSB : integer := 15;")
self.assertEqual(l[1], "constant C_FIELD_MYREG_MYFIELD_LSB : integer := 8;")
def test_gen_single_reg(self):
reg = Register("myreg", 8)
s = HectareVhdlGen._gen_single_reg(reg, self.DATA_W_BYTES)
self.assertEqual(s, "signal reg_myreg : std_logic_vector(32-1 downto 0);")
def test_gen_single_port(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(l[0], "myreg_myfield_o : out std_logic_vector(7 downto 0);")
self.assertEqual(l[1], "myreg_myfield_i : in std_logic_vector(7 downto 0);")
def test_gen_single_port_onebit(self):
field = Field("myfield", 8, 8, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(l[0], "myreg_myfield_o : out std_logic;")
self.assertEqual(l[1], "myreg_myfield_i : in std_logic;")
def test_gen_single_port_swmod(self):
field = Field("myfield", 8, 15, AccessType.r, AccessType.rw, swmod=True)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(len(l), 2, "expect to generate _o and _swmod ports")
self.assertEqual(l[0], "myreg_myfield_o : out std_logic_vector(7 downto 0);")
self.assertEqual(l[1], "myreg_myfield_swmod : out std_logic;")
def test_gen_single_hw_access_reg(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(15 downto 8);")
self.assertEqual(
l[1], "reg_myreg(15 downto 8) <= myreg_myfield_i when rising_edge(clk);"
)
def test_gen_single_hw_access_no_reg(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=False)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(15 downto 8);")
self.assertEqual(l[1], "reg_myreg(15 downto 8) <= myreg_myfield_i;")
def test_gen_single_hw_access_no_reg_onebit(self):
field = Field("myfield", 8, 8, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(8);")
self.assertEqual(l[1], "reg_myreg(8) <= myreg_myfield_i when rising_edge(clk);")
def test_gen_single_hw_access_enum_out(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 0, 2, AccessType.r, AccessType.rw, swmod=False, encode=ColorSel
)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(
l[0],
"myreg_myfield_o <= ColorSel_t'val(to_integer(unsigned(reg_myreg(2 downto 0))));",
)
def test_gen_single_hw_access_enum_in(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 0, 2, AccessType.w, AccessType.rw, swmod=False, encode=ColorSel
)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(
l[0],
"reg_myreg(2 downto 0) <= std_logic_vector(to_unsigned(ColorSel_t'pos(myreg_myfield_i), 3)) when rising_edge(clk);",
)
def test_gen_single_reg_swmod_no_swmod(self):
reg = Register("myreg", 0)
reg.fields.append(
Field("myfield1", 0, 7, AccessType.rw, AccessType.rw, swmod=False)
)
reg.fields.append(
Field("myfield2", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
)
swmod_reg = HectareVhdlGen._gen_single_reg_swmod(reg, self.DATA_W_BYTES)
self.assertIsNone(
swmod_reg, "if none of the fields has swmod, no swmod reg is generated"
)
def test_gen_single_reg_swmod_with_swmod(self):
reg = Register("myreg", 0)
reg.fields.append(
Field("myfield1", 0, 7, AccessType.rw, AccessType.rw, swmod=False)
)
reg.fields.append(
Field("myfield2", 8, 15, AccessType.rw, AccessType.rw, swmod=True)
)
swmod_reg = HectareVhdlGen._gen_single_reg_swmod(reg, self.DATA_W_BYTES)
self.assertEqual(
swmod_reg,
"signal reg_myreg_swmod : std_logic;",
"if at least one reg has swmod attribute set, reg is generated",
)
def test_gen_single_enum_type(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False, encode=ColorSel
)
lines = HectareVhdlGen._gen_single_enum_type(field)
self.assertEqual(
len(lines),
1 + 3 + 1,
"one line per each item, declaration and closing bracket",
)
self.assertTrue("RED" in lines[1])
self.assertTrue("GREEN" in lines[2])
self.assertTrue("BLUE" in lines[3])
def test_gen_single_enum_type_invalid(self):
""" generates un-supported encoding (inc != 1) and checks if generator raises expection """
class ColorSelInvalid(enum.Enum):
RED = 0
GREEN = 1
BLUE = 10 # <- this is not supported
field = Field(
"myfield",
8,
15,
AccessType.rw,
AccessType.rw,
swmod=False,
encode=ColorSelInvalid,
)
self.assertRaises(AssertionError, HectareVhdlGen._gen_single_enum_type, field)
def test_gen_single_reset_assignment(self):
RESET_VAL = 0x12
field = Field(
"myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False, reset=RESET_VAL
)
line = HectareVhdlGen._gen_single_reset_assignment("myreg", field)
assign_val = line.split("<=")[1].strip().replace(";", "")
self.assertEqual(assign_val, '"{0:08b}"'.format(RESET_VAL), "reset value")
self.assertEqual(len(assign_val), 8+2, "assign value must be of same size as the field")
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"hectare._hectare_types.Register",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reset_assignment",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_port",
"hectare._hectare_types.Field",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_field_range",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_addr",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reg",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_enum_type",
"hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reg_swmod"
] |
[((7276, 7291), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7289, 7291), False, 'import unittest\n'), ((429, 449), 'hectare._hectare_types.Register', 'Register', (['"""myreg"""', '(8)'], {}), "('myreg', 8)\n", (437, 449), False, 'from hectare._hectare_types import Field, Register\n'), ((462, 517), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_addr', 'HectareVhdlGen._gen_single_addr', (['reg', 'self.DATA_W_BYTES'], {}), '(reg, self.DATA_W_BYTES)\n', (493, 517), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((643, 709), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False)\n", (648, 709), False, 'from hectare._hectare_types import Field, Register\n'), ((722, 776), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_field_range', 'HectareVhdlGen._gen_single_field_range', (['"""myreg"""', 'field'], {}), "('myreg', field)\n", (760, 776), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((998, 1018), 'hectare._hectare_types.Register', 'Register', (['"""myreg"""', '(8)'], {}), "('myreg', 8)\n", (1006, 1018), False, 'from hectare._hectare_types import Field, Register\n'), ((1031, 1085), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reg', 'HectareVhdlGen._gen_single_reg', (['reg', 'self.DATA_W_BYTES'], {}), '(reg, self.DATA_W_BYTES)\n', (1061, 1085), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((1222, 1288), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False)\n", (1227, 1288), False, 'from hectare._hectare_types import Field, Register\n'), ((1301, 1348), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_port', 'HectareVhdlGen._gen_single_port', (['"""myreg"""', 'field'], {}), "('myreg', field)\n", (1332, 1348), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((1580, 1645), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(8)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 8, AccessType.rw, AccessType.rw, swmod=False)\n", (1585, 1645), False, 'from hectare._hectare_types import Field, Register\n'), ((1658, 1705), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_port', 'HectareVhdlGen._gen_single_port', (['"""myreg"""', 'field'], {}), "('myreg', field)\n", (1689, 1705), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((1898, 1962), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.r', 'AccessType.rw'], {'swmod': '(True)'}), "('myfield', 8, 15, AccessType.r, AccessType.rw, swmod=True)\n", (1903, 1962), False, 'from hectare._hectare_types import Field, Register\n'), ((1975, 2022), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_port', 'HectareVhdlGen._gen_single_port', (['"""myreg"""', 'field'], {}), "('myreg', field)\n", (2006, 2022), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((2320, 2386), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False)\n", (2325, 2386), False, 'from hectare._hectare_types import Field, Register\n'), ((2399, 2464), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access', 'HectareVhdlGen._gen_single_hw_access', (['"""myreg"""', 'field'], {'in_reg': '(True)'}), "('myreg', field, in_reg=True)\n", (2435, 2464), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((2728, 2794), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False)\n", (2733, 2794), False, 'from hectare._hectare_types import Field, Register\n'), ((2807, 2873), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access', 'HectareVhdlGen._gen_single_hw_access', (['"""myreg"""', 'field'], {'in_reg': '(False)'}), "('myreg', field, in_reg=False)\n", (2843, 2873), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((3100, 3165), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(8)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield', 8, 8, AccessType.rw, AccessType.rw, swmod=False)\n", (3105, 3165), False, 'from hectare._hectare_types import Field, Register\n'), ((3178, 3243), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access', 'HectareVhdlGen._gen_single_hw_access', (['"""myreg"""', 'field'], {'in_reg': '(True)'}), "('myreg', field, in_reg=True)\n", (3214, 3243), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((3566, 3652), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(0)', '(2)', 'AccessType.r', 'AccessType.rw'], {'swmod': '(False)', 'encode': 'ColorSel'}), "('myfield', 0, 2, AccessType.r, AccessType.rw, swmod=False, encode=\n ColorSel)\n", (3571, 3652), False, 'from hectare._hectare_types import Field, Register\n'), ((3682, 3747), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access', 'HectareVhdlGen._gen_single_hw_access', (['"""myreg"""', 'field'], {'in_reg': '(True)'}), "('myreg', field, in_reg=True)\n", (3718, 3747), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((4062, 4148), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(0)', '(2)', 'AccessType.w', 'AccessType.rw'], {'swmod': '(False)', 'encode': 'ColorSel'}), "('myfield', 0, 2, AccessType.w, AccessType.rw, swmod=False, encode=\n ColorSel)\n", (4067, 4148), False, 'from hectare._hectare_types import Field, Register\n'), ((4178, 4243), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_hw_access', 'HectareVhdlGen._gen_single_hw_access', (['"""myreg"""', 'field'], {'in_reg': '(True)'}), "('myreg', field, in_reg=True)\n", (4214, 4243), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((4492, 4512), 'hectare._hectare_types.Register', 'Register', (['"""myreg"""', '(0)'], {}), "('myreg', 0)\n", (4500, 4512), False, 'from hectare._hectare_types import Field, Register\n'), ((4766, 4826), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reg_swmod', 'HectareVhdlGen._gen_single_reg_swmod', (['reg', 'self.DATA_W_BYTES'], {}), '(reg, self.DATA_W_BYTES)\n', (4802, 4826), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((5015, 5035), 'hectare._hectare_types.Register', 'Register', (['"""myreg"""', '(0)'], {}), "('myreg', 0)\n", (5023, 5035), False, 'from hectare._hectare_types import Field, Register\n'), ((5288, 5348), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reg_swmod', 'HectareVhdlGen._gen_single_reg_swmod', (['reg', 'self.DATA_W_BYTES'], {}), '(reg, self.DATA_W_BYTES)\n', (5324, 5348), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((5693, 5781), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)', 'encode': 'ColorSel'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False, encode=\n ColorSel)\n", (5698, 5781), False, 'from hectare._hectare_types import Field, Register\n'), ((5816, 5859), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_enum_type', 'HectareVhdlGen._gen_single_enum_type', (['field'], {}), '(field)\n', (5852, 5859), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((6448, 6543), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)', 'encode': 'ColorSelInvalid'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False, encode=\n ColorSelInvalid)\n", (6453, 6543), False, 'from hectare._hectare_types import Field, Register\n'), ((6814, 6902), 'hectare._hectare_types.Field', 'Field', (['"""myfield"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)', 'reset': 'RESET_VAL'}), "('myfield', 8, 15, AccessType.rw, AccessType.rw, swmod=False, reset=\n RESET_VAL)\n", (6819, 6902), False, 'from hectare._hectare_types import Field, Register\n'), ((6936, 6995), 'hectare._HectareVhdlGen.HectareVhdlGen._gen_single_reset_assignment', 'HectareVhdlGen._gen_single_reset_assignment', (['"""myreg"""', 'field'], {}), "('myreg', field)\n", (6979, 6995), False, 'from hectare._HectareVhdlGen import HectareVhdlGen\n'), ((4552, 4618), 'hectare._hectare_types.Field', 'Field', (['"""myfield1"""', '(0)', '(7)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield1', 0, 7, AccessType.rw, AccessType.rw, swmod=False)\n", (4557, 4618), False, 'from hectare._hectare_types import Field, Register\n'), ((4668, 4735), 'hectare._hectare_types.Field', 'Field', (['"""myfield2"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield2', 8, 15, AccessType.rw, AccessType.rw, swmod=False)\n", (4673, 4735), False, 'from hectare._hectare_types import Field, Register\n'), ((5075, 5141), 'hectare._hectare_types.Field', 'Field', (['"""myfield1"""', '(0)', '(7)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(False)'}), "('myfield1', 0, 7, AccessType.rw, AccessType.rw, swmod=False)\n", (5080, 5141), False, 'from hectare._hectare_types import Field, Register\n'), ((5191, 5257), 'hectare._hectare_types.Field', 'Field', (['"""myfield2"""', '(8)', '(15)', 'AccessType.rw', 'AccessType.rw'], {'swmod': '(True)'}), "('myfield2', 8, 15, AccessType.rw, AccessType.rw, swmod=True)\n", (5196, 5257), False, 'from hectare._hectare_types import Field, Register\n')]
|
#
# This file is part of pyasn1-alt-modules software.
#
# Created by <NAME>
# Copyright (c) 2020-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import rfc9092
class GeofeedCSVTestCase(unittest.TestCase):
pem_text = """\
<KEY>
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
assert asn1Object['contentType'] == rfc5652.id_signedData
sd, rest = der_decoder(asn1Object['content'],
asn1Spec=rfc5652.SignedData())
self.assertFalse(rest)
self.assertTrue(sd.prettyPrint())
self.assertEqual(asn1Object['content'], der_encoder(sd))
found = False
for sa in sd['signerInfos'][0]['signedAttrs']:
if sa['attrType'] == rfc5652.id_contentType:
ct, rest = der_decoder(sa['attrValues'][0],
asn1Spec=rfc5652.ContentType())
self.assertFalse(rest)
self.assertTrue(ct.prettyPrint())
self.assertEqual(sa['attrValues'][0], der_encoder(ct))
self.assertEqual(rfc9092.id_ct_geofeedCSVwithCRLF, ct)
found = True
assert found
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"pyasn1_alt_modules.rfc5652.ContentType",
"pyasn1.codec.der.decoder.decode",
"unittest.TestLoader",
"pyasn1_alt_modules.rfc5652.ContentInfo",
"pyasn1_alt_modules.pem.readBase64fromText",
"pyasn1.codec.der.encoder.encode",
"pyasn1_alt_modules.rfc5652.SignedData"
] |
[((599, 620), 'pyasn1_alt_modules.rfc5652.ContentInfo', 'rfc5652.ContentInfo', ([], {}), '()\n', (618, 620), False, 'from pyasn1_alt_modules import rfc5652\n'), ((670, 707), 'pyasn1_alt_modules.pem.readBase64fromText', 'pem.readBase64fromText', (['self.pem_text'], {}), '(self.pem_text)\n', (692, 707), False, 'from pyasn1_alt_modules import pem\n'), ((735, 781), 'pyasn1.codec.der.decoder.decode', 'der_decoder', (['substrate'], {'asn1Spec': 'self.asn1Spec'}), '(substrate, asn1Spec=self.asn1Spec)\n', (746, 781), True, 'from pyasn1.codec.der.decoder import decode as der_decoder\n'), ((1772, 1793), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1791, 1793), False, 'import unittest\n'), ((899, 922), 'pyasn1.codec.der.encoder.encode', 'der_encoder', (['asn1Object'], {}), '(asn1Object)\n', (910, 922), True, 'from pyasn1.codec.der.encoder import encode as der_encoder\n'), ((1209, 1224), 'pyasn1.codec.der.encoder.encode', 'der_encoder', (['sd'], {}), '(sd)\n', (1220, 1224), True, 'from pyasn1.codec.der.encoder import encode as der_encoder\n'), ((1869, 1905), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1892, 1905), False, 'import unittest\n'), ((1066, 1086), 'pyasn1_alt_modules.rfc5652.SignedData', 'rfc5652.SignedData', ([], {}), '()\n', (1084, 1086), False, 'from pyasn1_alt_modules import rfc5652\n'), ((1621, 1636), 'pyasn1.codec.der.encoder.encode', 'der_encoder', (['ct'], {}), '(ct)\n', (1632, 1636), True, 'from pyasn1.codec.der.encoder import encode as der_encoder\n'), ((1452, 1473), 'pyasn1_alt_modules.rfc5652.ContentType', 'rfc5652.ContentType', ([], {}), '()\n', (1471, 1473), False, 'from pyasn1_alt_modules import rfc5652\n')]
|
"""
MIT License
Copyright (c) 2019 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from jishaku.paginators import PaginatorInterface, PaginatorEmbedInterface, WrappedPaginator
from fire.converters import Member
from discord.ext import commands
from aiotrello import Trello
from typing import Union
import discord
import datetime
import os
import platform
import json
import time
import psutil
import asyncio
import traceback
import humanfriendly
import inspect
import textwrap
import io
import copy
import aiohttp
import subprocess
import random
launchtime = datetime.datetime.utcnow()
process = psutil.Process(os.getpid())
print("fire.py has been loaded")
def config(path: str = None):
with open('config.json', 'r') as cfg:
config = json.load(cfg)
if path != None:
return config[path]
else:
return config
config = config()
def isadmin(ctx):
"""Checks if the author is an admin"""
if str(ctx.author.id) not in config('admins'):
admin = False
else:
admin = True
return admin
class firecog(commands.Cog, name="Main Commands"):
def __init__(self, bot):
self.bot = bot
self.trello = Trello(key=config['trellokey'], token=config['trellotoken'])
self.launchtime = launchtime
self._last_result = None
def cleanup_code(self, content):
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
return content.strip('` \n')
@commands.command(name="invite")
async def inviteme(self, ctx):
return await ctx.send("https://gaminggeek.dev/fire")
@commands.command(description="Shows you my ping to discord's servers")
async def ping(self, ctx):
"""PFXping"""
latency = round(self.bot.latency * 1000)
start = round(time.time()*1000)
msg = await ctx.send(content="Pinging...")
end = round(time.time()*1000)
elapsed = round(end - start)
color = ctx.author.color
embed = discord.Embed(title=f":ping_pong: {elapsed}ms.\n:heartpulse: {latency}ms.", colour=color, timestamp=datetime.datetime.utcnow())
await msg.edit(content="`Pong!`", embed=embed)
@commands.command(description="Suggest a feature")
@commands.cooldown(1, 300, commands.BucketType.user)
async def suggest(self, ctx, *, suggestion: str):
"""PFXsuggest <suggestion>"""
if suggestion == None:
await ctx.send("You can't suggest nothing!")
else:
board = await self.trello.get_board(lambda b: b.name == "Fire")
suggestions = await board.get_list(lambda l: l.name == "Suggestions")
card = await suggestions.create_card(suggestion, f"Suggested by {ctx.author.name} ({ctx.author.id})")
now = datetime.datetime.utcnow().strftime('%d/%m/%Y @ %I:%M:%S %p')
await card.add_comment(f"Suggested in channel {ctx.channel.name} ({ctx.channel.id}) in guild {ctx.guild.name} ({ctx.guild.id}) at {now} UTC")
await ctx.send(f"Thanks! Your suggestion was added to the Trello @ <{card.url}>. Any abuse will lead to being blacklisted from Fire!")
@commands.command(description="Shows you some stats about me.", aliases=['about'])
async def stats(self, ctx):
"""PFXstats"""
msg = await ctx.send('Gathering info...')
delta_uptime = datetime.datetime.utcnow() - launchtime
hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
uptime = f"{days}d, {hours}h, {minutes}m, {seconds}s"
cpustats = psutil.cpu_percent()
ramuse = (process.memory_info().rss / 1024) / 1000
online = 0
idle = 0
dnd = 0
offline = 0
streaming = 0
members = self.bot.get_all_members()
for member in members:
if str(member.status) == 'online':
online = online + 1
if str(member.status) == 'idle':
idle = idle + 1
if str(member.status) == 'dnd':
dnd = dnd + 1
if str(member.status) == 'offline':
offline = offline + 1
try:
activity = member.activities[0]
if isinstance(member.activities[0], discord.activity.Streaming):
streaming = streaming + 1
except Exception:
pass
users = online + idle + dnd + offline
embed = discord.Embed(colour=ctx.author.color, timestamp=datetime.datetime.utcnow())
ownerboi = self.bot.get_user(287698408855044097)
embed.set_author(name=f"Bot made by {ownerboi}", url="https://gaminggeek.dev", icon_url=str(ownerboi.avatar_url))
embed.add_field(name="Runtime", value=f"{uptime}", inline=False)
embed.add_field(name="CPU", value=f"{round(cpustats)}%", inline=False)
embed.add_field(name="RAM", value=f"{ramuse} MB", inline=False)
embed.add_field(name="Version Info", value=f"discord.py {discord.__version__} | Python: 3.7.4", inline=False)
embed.add_field(name="Guilds", value=f"{len(self.bot.guilds)}", inline=True)
embed.add_field(name="Prefix", value=f"{ctx.prefix}", inline=True)
embed.add_field(name="Commands", value=len(self.bot.commands), inline=True)
embed.add_field(name="Members", value=f"{self.bot.get_emoji(313956277808005120)} {online:,d}\n{self.bot.get_emoji(313956277220802560)} {idle:,d}\n{self.bot.get_emoji(313956276893646850)} {dnd:,d}\n{self.bot.get_emoji(313956277132853248)} {streaming:,d}\n{self.bot.get_emoji(313956277237710868)} {offline:,d}\nTotal: {users:,d}\n ", inline=False)
await msg.edit(content=None, embed=embed)
@commands.command(description="Shows you all the guilds I'm in.")
async def listguilds(self, ctx):
"""PFXlistguilds"""
if not isadmin(ctx):
return
paginator = WrappedPaginator(prefix='```vbs', suffix='```', max_size=1500)
gcount = 1
for guild in self.bot.guilds:
if guild == ctx.guild:
current = ' (HERE)'
else:
current = ''
#paginator.add_line(f'[{gcount}] {guild.name}{current} || {guild.owner} || {guild.member_count} Members')
paginator.add_line(f'[{gcount}] {guild.name}{current} || {guild.owner} || {guild.member_count} Members')
gcount = gcount + 1
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
@commands.command(name='rpc', description='View someone\'s rich presence')
async def rpc(self, ctx, *, member: Member = None, MSG: discord.Message = None, ACT: int = 0):
"""PFXrpc [<member>]"""
if not member:
member = ctx.author
if ACT == -1:
return
try:
activity = member.activities[ACT]
except IndexError:
if ACT != 0:
return
activity = None
embed = None
if activity != None:
if activity.name == 'Spotify':
adict = activity.to_dict()
embed = discord.Embed(color=activity.color, timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}\'s Spotify Info', icon_url='https://cdn.discordapp.com/emojis/471412444716072960.png')
embed.add_field(name='Song', value=activity.title, inline=False)
embed.add_field(name='Artists', value=', '.join(activity.artists), inline=False)
embed.add_field(name='Album', value=activity.album, inline=False)
duration = humanfriendly.format_timespan(activity.duration)
now = datetime.datetime.utcnow()
elapsed = humanfriendly.format_timespan(now - activity.start)
left = humanfriendly.format_timespan(activity.end - now)
if 'day' in left:
left = '0:00:00'
embed.add_field(name='Times', value=f'Duration: {duration}\nElapsed: {elapsed}\nLeft: {left}', inline=False)
embed.add_field(name='Listen to this track', value=f'[{activity.title}](https://open.spotify.com/track/{activity.track_id})', inline=False)
embed.set_thumbnail(url=activity.album_cover_url)
elif type(activity) == discord.Streaming:
embed = discord.Embed(color=discord.Color.purple(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}\'s Stream Info', icon_url='https://cdn.discordapp.com/emojis/603188557242433539.png')
if member.bot:
embed.add_field(name='Title', value=activity.name, inline=False)
else:
embed.add_field(name='Title', value=activity.name, inline=False)
embed.add_field(name='Twitch Name', value=activity.twitch_name, inline=False)
if activity.details != None:
embed.add_field(name='Game', value=activity.details, inline=False)
embed.add_field(name='URL', value=f'[{activity.twitch_name}]({activity.url})', inline=False)
elif type(activity) == discord.Activity:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow())
if activity.small_image_url != None:
embed.set_author(name=f'{member}\'s Game Info', icon_url=activity.small_image_url)
else:
embed.set_author(name=f'{member}\'s Game Info')
embed.add_field(name='Game', value=activity.name, inline=False)
now = datetime.datetime.utcnow()
elapsed = None
if activity.start:
elapsed = humanfriendly.format_timespan(now - activity.start)
if activity.details != None and activity.state != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.details}\n{activity.state}\n{elapsed} elapsed', inline=False)
elif activity.state != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.state}\n{elapsed} elapsed', inline=False)
elif activity.details != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.details}\n{elapsed} elapsed', inline=False)
elif activity.details != None and activity.state !=None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.details}\n{activity.state}', inline=False)
elif activity.state != None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.state}', inline=False)
elif activity.details != None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.details}', inline=False)
if activity.large_image_url != None:
embed.set_thumbnail(url=activity.large_image_url)
else:
pass
if embed:
if MSG:
await MSG.edit(embed=embed)
def react_check(reaction, user):
return user.id == ctx.author.id
try:
reaction, user = await self.bot.wait_for('reaction_add', check=react_check, timeout=120)
except asyncio.TimeoutError:
return
if reaction.emoji == '⏹':
await MSG.delete()
elif reaction.emoji == '◀':
await MSG.remove_reaction('◀', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT-1)
elif reaction.emoji == '▶':
await MSG.remove_reaction('▶', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT+1)
else:
MSG = await ctx.send(embed=embed)
await MSG.add_reaction('⏹')
await MSG.add_reaction('◀')
await MSG.add_reaction('▶')
def react_check(reaction, user):
return user.id == ctx.author.id
try:
reaction, user = await self.bot.wait_for('reaction_add', check=react_check, timeout=120)
except asyncio.TimeoutError:
return
if reaction.emoji == '⏹':
await MSG.delete()
elif reaction.emoji == '◀':
await MSG.remove_reaction('◀', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT-1)
elif reaction.emoji == '▶':
await MSG.remove_reaction('▶', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT+1)
else:
await ctx.send(f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...')
else:
await ctx.send(f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...')
@commands.command(description="dab")
async def dab(self, ctx):
"""PFXdab"""
await ctx.send(f"{ctx.message.author.mention}, <o/")
@commands.command(description="idk")
async def warm(self, ctx, *, warm: str):
"""PFXwarm <item>"""
await ctx.send(f'🔥 Warming up {discord.utils.escape_mentions(discord.utils.escape_markdown(warm))}')
@commands.command(description='Cow goes moo')
async def cowsay(self, ctx, *, cow: str):
"""PFXcowsay <text>"""
async with aiohttp.ClientSession() as session:
async with session.get(f'http://cowsay.morecode.org/say?message={cow}&format=json') as resp:
body = await resp.json()
cow = body['cow']
cow = discord.utils.escape_mentions(cow).replace('`', '')
await ctx.send(f'```{cow}```')
@commands.command(description='ascii text')
async def ascii(self, ctx, *, text: str):
"""PFXascii <text>"""
textsplit = text.split(' ')
text = '+'.join(textsplit)
async with aiohttp.ClientSession() as session:
async with session.get(f'http://artii.herokuapp.com/make?text={text}') as resp:
body = await resp.text()
try:
asciimsg = discord.utils.escape_mentions(body).replace('`', '')
await ctx.send(f'```{asciimsg}```')
except discord.HTTPException as e:
e = str(e)
if 'Must be 2000 or fewer in length.' in e:
return await ctx.send('That message is too long. Try a shorter one!')
@commands.command(name='👏', aliases=['clap'], description='Emphasize your message with claps')
async def clap(self, ctx, *, clappyboi: str = 'You need to provide a message for me to emphasize'):
'''PFXclap <message>'''
message = discord.utils.escape_mentions(clappyboi)
message = message.split(' ')
message = ' 👏 '.join(message)
await ctx.send(message + ' 👏')
@commands.command(name="8ball")
async def eightball(self, ctx, *, q: str = None):
if not q:
return await ctx.send(f'<a:fireFailed:603214400748257302> You need to ask a question!')
possible = ["It is certain.", "It is decidedly so.", "Without a doubt.", "Yes - definitely.", "You may rely on it.", "As I see it, yes.", "Most likely.", "Outlook good.", "Yes.", "Signs point to yes.",
"Reply hazy, try again.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.",
"Don't count on it.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Very doubtful."]
answer = random.choice(possible)
await ctx.send(answer)
def setup(bot):
bot.add_cog(firecog(bot))
|
[
"humanfriendly.format_timespan",
"json.load",
"os.getpid",
"discord.ext.commands.command",
"discord.utils.escape_markdown",
"jishaku.paginators.WrappedPaginator",
"aiotrello.Trello",
"random.choice",
"time.time",
"aiohttp.ClientSession",
"datetime.datetime.utcnow",
"discord.ext.commands.cooldown",
"discord.Color.purple",
"discord.utils.escape_mentions",
"jishaku.paginators.PaginatorInterface",
"psutil.cpu_percent"
] |
[((1597, 1623), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1621, 1623), False, 'import datetime\n'), ((1650, 1661), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1659, 1661), False, 'import os\n'), ((2476, 2507), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""invite"""'}), "(name='invite')\n", (2492, 2507), False, 'from discord.ext import commands\n'), ((2602, 2672), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows you my ping to discord\'s servers"""'}), '(description="Shows you my ping to discord\'s servers")\n', (2618, 2672), False, 'from discord.ext import commands\n'), ((3131, 3180), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Suggest a feature"""'}), "(description='Suggest a feature')\n", (3147, 3180), False, 'from discord.ext import commands\n'), ((3184, 3235), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(300)', 'commands.BucketType.user'], {}), '(1, 300, commands.BucketType.user)\n', (3201, 3235), False, 'from discord.ext import commands\n'), ((4015, 4101), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows you some stats about me."""', 'aliases': "['about']"}), "(description='Shows you some stats about me.', aliases=[\n 'about'])\n", (4031, 4101), False, 'from discord.ext import commands\n'), ((6350, 6414), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows you all the guilds I\'m in."""'}), '(description="Shows you all the guilds I\'m in.")\n', (6366, 6414), False, 'from discord.ext import commands\n'), ((7065, 7137), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""rpc"""', 'description': '"""View someone\'s rich presence"""'}), '(name=\'rpc\', description="View someone\'s rich presence")\n', (7081, 7137), False, 'from discord.ext import commands\n'), ((12805, 12840), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""dab"""'}), "(description='dab')\n", (12821, 12840), False, 'from discord.ext import commands\n'), ((12946, 12981), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""idk"""'}), "(description='idk')\n", (12962, 12981), False, 'from discord.ext import commands\n'), ((13158, 13202), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Cow goes moo"""'}), "(description='Cow goes moo')\n", (13174, 13202), False, 'from discord.ext import commands\n'), ((13571, 13613), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""ascii text"""'}), "(description='ascii text')\n", (13587, 13613), False, 'from discord.ext import commands\n'), ((14205, 14303), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""👏"""', 'aliases': "['clap']", 'description': '"""Emphasize your message with claps"""'}), "(name='👏', aliases=['clap'], description=\n 'Emphasize your message with claps')\n", (14221, 14303), False, 'from discord.ext import commands\n'), ((14586, 14616), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""8ball"""'}), "(name='8ball')\n", (14602, 14616), False, 'from discord.ext import commands\n'), ((1784, 1798), 'json.load', 'json.load', (['cfg'], {}), '(cfg)\n', (1793, 1798), False, 'import json\n'), ((2170, 2230), 'aiotrello.Trello', 'Trello', ([], {'key': "config['trellokey']", 'token': "config['trellotoken']"}), "(key=config['trellokey'], token=config['trellotoken'])\n", (2176, 2230), False, 'from aiotrello import Trello\n'), ((4468, 4488), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (4486, 4488), False, 'import psutil\n'), ((6523, 6585), 'jishaku.paginators.WrappedPaginator', 'WrappedPaginator', ([], {'prefix': '"""```vbs"""', 'suffix': '"""```"""', 'max_size': '(1500)'}), "(prefix='```vbs', suffix='```', max_size=1500)\n", (6539, 6585), False, 'from jishaku.paginators import PaginatorInterface, PaginatorEmbedInterface, WrappedPaginator\n'), ((6971, 7027), 'jishaku.paginators.PaginatorInterface', 'PaginatorInterface', (['ctx.bot', 'paginator'], {'owner': 'ctx.author'}), '(ctx.bot, paginator, owner=ctx.author)\n', (6989, 7027), False, 'from jishaku.paginators import PaginatorInterface, PaginatorEmbedInterface, WrappedPaginator\n'), ((14441, 14481), 'discord.utils.escape_mentions', 'discord.utils.escape_mentions', (['clappyboi'], {}), '(clappyboi)\n', (14470, 14481), False, 'import discord\n'), ((15232, 15255), 'random.choice', 'random.choice', (['possible'], {}), '(possible)\n', (15245, 15255), False, 'import random\n'), ((4208, 4234), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4232, 4234), False, 'import datetime\n'), ((13287, 13310), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (13308, 13310), False, 'import aiohttp\n'), ((13758, 13781), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (13779, 13781), False, 'import aiohttp\n'), ((2780, 2791), 'time.time', 'time.time', ([], {}), '()\n', (2789, 2791), False, 'import time\n'), ((2859, 2870), 'time.time', 'time.time', ([], {}), '()\n', (2868, 2870), False, 'import time\n'), ((3048, 3074), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3072, 3074), False, 'import datetime\n'), ((5203, 5229), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5227, 5229), False, 'import datetime\n'), ((8012, 8060), 'humanfriendly.format_timespan', 'humanfriendly.format_timespan', (['activity.duration'], {}), '(activity.duration)\n', (8041, 8060), False, 'import humanfriendly\n'), ((8072, 8098), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8096, 8098), False, 'import datetime\n'), ((8114, 8165), 'humanfriendly.format_timespan', 'humanfriendly.format_timespan', (['(now - activity.start)'], {}), '(now - activity.start)\n', (8143, 8165), False, 'import humanfriendly\n'), ((8178, 8227), 'humanfriendly.format_timespan', 'humanfriendly.format_timespan', (['(activity.end - now)'], {}), '(activity.end - now)\n', (8207, 8227), False, 'import humanfriendly\n'), ((13480, 13514), 'discord.utils.escape_mentions', 'discord.utils.escape_mentions', (['cow'], {}), '(cow)\n', (13509, 13514), False, 'import discord\n'), ((3663, 3689), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3687, 3689), False, 'import datetime\n'), ((13931, 13966), 'discord.utils.escape_mentions', 'discord.utils.escape_mentions', (['body'], {}), '(body)\n', (13960, 13966), False, 'import discord\n'), ((7616, 7642), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7640, 7642), False, 'import datetime\n'), ((9725, 9751), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9749, 9751), False, 'import datetime\n'), ((8667, 8689), 'discord.Color.purple', 'discord.Color.purple', ([], {}), '()\n', (8687, 8689), False, 'import discord\n'), ((8701, 8727), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8725, 8727), False, 'import datetime\n'), ((9812, 9863), 'humanfriendly.format_timespan', 'humanfriendly.format_timespan', (['(now - activity.start)'], {}), '(now - activity.start)\n', (9841, 9863), False, 'import humanfriendly\n'), ((13116, 13151), 'discord.utils.escape_markdown', 'discord.utils.escape_markdown', (['warm'], {}), '(warm)\n', (13145, 13151), False, 'import discord\n'), ((9421, 9447), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9445, 9447), False, 'import datetime\n')]
|
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2020-07-31 11:03:17 +0100 (Fri, 31 Jul 2020)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Finds missing files by numeric sequence, assuming a uniformly numbered file naming convention across files
Files / directories are given as arguments or via standard input
Directories are recursed and their files examined for missing numbers before each one
Only supply files / directories that should be sharing a contiguously numbered file naming convention in each
single run of this tool
Accounts for zero padding in numbered files
Caveats:
- This is more complicated than you'd first think as there are so many file naming variations that no code could ever
be universally bulletproof and will likely require advanced regex tuning to match your use case and naming convention
- Won't detect missing files higher than the highest numbered file as there is no way to know how many there should be.
If you are looking for missing MP3 files, then you might be able to check the mp3 tag metadata using programs like
'mediainfo' to get the total number of tracks and see if the files go that high
- Returns globs by default instead of explicit missing filenames since suffixes can vary after numbers. If you have a
simple enough use case with a single fixed filename convention such as 'blah_01.txt' then you can find code to print
the missing files more explicitly, but in the general case you cannot account for suffix naming that isn't consistent,
such as chapters of audiobooks eg.
'blah 01 - chapter about X.mp3'
'blah 02 - chapter about Y.mp3'
so in the general case you cannot always infer suffixes, hence why it is left as globs. If you are sure that the
suffixes don't change then you can specify --fixed-suffix and it will infer each file's suffix as the basis for any
numerically missing files in the sequence, but if used where this is not the case, it'll generate a lot of false
positives that the default globbing mode would have handled
- Doesn't currently find entire missing CD / disks in the naming format, but you should be able to see those cases
easily by eye
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
#import logging
import os
import re
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.3.2'
# pylint: disable=too-many-instance-attributes
class FindMissingFiles(CLI):
def __init__(self):
# Python 2.x
super(FindMissingFiles, self).__init__()
# Python 3.x
# super().__init__()
self.paths = []
self.regex_default = r'(?<!dis[ck]\s)' + \
r'(?<!CD\s)' + \
r'(?<!-)' + \
r'(?<!-\d)' + \
r'(?<!-\d\d)' + \
r'(?<!0)' + \
r'(?<!\d\.)' + \
r'(?<!\.mp)' + \
r'(\d+)' + \
r'(?![\w,@-])' + \
r'(?!\.\d)'
self.exclude_default = r'^\d+\s'
self.regex = None
self.include = None
self.exclude = None
self.fixed_suffix = False
self.missing_files = []
def add_options(self):
super(FindMissingFiles, self).add_options()
self.add_opt('-r', '--regex', metavar='REGEX', default=self.regex_default,
help='Regex capture of the portion of the filename to compare ' + \
'- must have capture brackets capturing an integer ' + \
'(default: "{}" )'\
.format(self.regex_default))
self.add_opt('-i', '--include', metavar='REGEX',
help=r"Include only paths that match the given case-insensitive regex (eg. '\.mp3$')")
self.add_opt('-e', '--exclude', metavar='REGEX', default=self.exclude_default,
help='Exclude paths that match the given case-insensitive regex (default: "{}" )'\
.format(self.exclude_default))
self.add_opt('-s', '--fixed-suffix', action='store_true',
help='Assume fixed suffixes and infer explicit filenames rather than globs. The reason this ' + \
'is not the default is that if this is not the case and there is some variation in ' + \
'suffixes, such as with audiobook chapters, then you will hit a lot of false positives ' + \
'that would have been caught by globbing')
def process_options(self):
super(FindMissingFiles, self).process_options()
self.regex = self.get_opt('regex')
self.include = self.get_opt('include')
self.exclude = self.get_opt('exclude')
self.fixed_suffix = self.get_opt('fixed_suffix')
validate_regex(self.regex)
self.regex = re.compile('(.*?)' + self.regex + '(.*)', re.I)
if self.include is not None:
validate_regex(self.include)
self.include = re.compile(self.include, re.I)
if self.exclude is not None:
validate_regex(self.exclude)
self.exclude = re.compile(self.exclude, re.I)
if self.args:
self.paths = self.args
else:
self.paths = sys.stdin.readlines()
log_option('paths', self.paths)
def is_included(self, path):
if not self.include:
return True
if self.include.search(path):
log.debug("including path: %s", path)
return True
return False
def is_excluded(self, path):
if not self.exclude:
return False
if self.exclude.search(path):
log.debug("excluding path: %s", path)
return True
return False
def run(self):
for path in self.paths:
if self.is_excluded(path):
continue
if not self.is_included(path):
continue
if not os.path.exists(path):
raise UnknownError('path not found: {}'.format(path))
if os.path.isdir(path):
self.process_directory(directory=path)
elif os.path.isfile(path):
self.check_file(filename=path)
def process_directory(self, directory):
for root, dirs, files in os.walk(directory, topdown=True):
for filename in files:
file_path = os.path.join(root, filename)
if not self.is_included(file_path):
continue
if self.is_excluded(file_path):
continue
self.check_file(filename=file_path)
for dirname in dirs:
dir_path = os.path.join(root, dirname)
if not self.is_included(dir_path):
continue
if self.is_excluded(dir_path):
continue
# massive depth directories will hit a recursion limit here but this is very rare in the real world
# and probably a sign the filesystem should be better structured
self.process_directory(directory=dir_path)
def check_file(self, filename):
log.debug('checking file \'%s\'', filename)
match = self.regex.search(os.path.basename(filename))
if not match:
log.debug('no numeric regex match for file, probably not a sequential file' + \
', skipping \'%s\'', filename)
return
# will error out here if you've supplied your own regex without capture brackets
# or if you've got pre-captures - let this bubble to user to fix their regex
file_prefix = os.path.join(os.path.dirname(filename), match.group(1))
file_number = match.group(2)
file_suffix = match.group(3)
if not isInt(file_number):
raise UnknownError('regex captured non-float for filename: {}'.format(filename))
if file_prefix is None:
file_prefix = ''
if file_suffix is None:
file_suffix = ''
padding = len(file_number)
file_number = int(file_number)
while file_number > 1:
file_number = self.determine_missing_file_backfill(file_prefix, file_number, padding, file_suffix)
if self.missing_files:
print('\n'.join(reversed(self.missing_files)))
self.missing_files = []
def determine_missing_file_backfill(self, file_prefix, file_number, padding, file_suffix):
file_number -= 1
if self.fixed_suffix:
explicit_last_filename = '{}{:0>%(padding)s}{}' % {'padding': padding}
explicit_last_filename = explicit_last_filename.format(file_prefix, file_number, file_suffix)
if not os.path.isfile(explicit_last_filename):
self.missing_files.append(explicit_last_filename)
else:
file_number = -1
else:
expected_last_filename_glob = '{}{:0>%(padding)s}*' % locals()
expected_last_filename_glob = expected_last_filename_glob.format(file_prefix, file_number)
if not glob.glob(expected_last_filename_glob):
self.missing_files.append(expected_last_filename_glob)
else:
file_number = -1
return file_number
if __name__ == '__main__':
FindMissingFiles().main()
|
[
"sys.path.append",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.walk",
"os.path.exists",
"os.path.isfile",
"harisekhon.utils.log_option",
"traceback.format_exc",
"sys.stdin.readlines",
"harisekhon.utils.validate_regex",
"harisekhon.utils.log.debug",
"glob.glob",
"harisekhon.utils.isInt",
"os.path.join",
"sys.exit",
"re.compile"
] |
[((2764, 2793), 'os.path.join', 'os.path.join', (['srcdir', '"""pylib"""'], {}), "(srcdir, 'pylib')\n", (2776, 2793), False, 'import os\n'), ((2794, 2817), 'sys.path.append', 'sys.path.append', (['libdir'], {}), '(libdir)\n', (2809, 2817), False, 'import sys\n'), ((2728, 2753), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2743, 2753), False, 'import os\n'), ((3011, 3022), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (3019, 3022), False, 'import sys\n'), ((5630, 5656), 'harisekhon.utils.validate_regex', 'validate_regex', (['self.regex'], {}), '(self.regex)\n', (5644, 5656), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((5678, 5725), 're.compile', 're.compile', (["('(.*?)' + self.regex + '(.*)')", 're.I'], {}), "('(.*?)' + self.regex + '(.*)', re.I)\n", (5688, 5725), False, 'import re\n'), ((6124, 6155), 'harisekhon.utils.log_option', 'log_option', (['"""paths"""', 'self.paths'], {}), "('paths', self.paths)\n", (6134, 6155), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((7147, 7179), 'os.walk', 'os.walk', (['directory'], {'topdown': '(True)'}), '(directory, topdown=True)\n', (7154, 7179), False, 'import os\n'), ((8028, 8069), 'harisekhon.utils.log.debug', 'log.debug', (['"""checking file \'%s\'"""', 'filename'], {}), '("checking file \'%s\'", filename)\n', (8037, 8069), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((2975, 2997), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2995, 2997), False, 'import traceback\n'), ((5775, 5803), 'harisekhon.utils.validate_regex', 'validate_regex', (['self.include'], {}), '(self.include)\n', (5789, 5803), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((5831, 5861), 're.compile', 're.compile', (['self.include', 're.I'], {}), '(self.include, re.I)\n', (5841, 5861), False, 'import re\n'), ((5911, 5939), 'harisekhon.utils.validate_regex', 'validate_regex', (['self.exclude'], {}), '(self.exclude)\n', (5925, 5939), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((5967, 5997), 're.compile', 're.compile', (['self.exclude', 're.I'], {}), '(self.exclude, re.I)\n', (5977, 5997), False, 'import re\n'), ((6094, 6115), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (6113, 6115), False, 'import sys\n'), ((6293, 6330), 'harisekhon.utils.log.debug', 'log.debug', (['"""including path: %s"""', 'path'], {}), "('including path: %s', path)\n", (6302, 6330), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((6514, 6551), 'harisekhon.utils.log.debug', 'log.debug', (['"""excluding path: %s"""', 'path'], {}), "('excluding path: %s', path)\n", (6523, 6551), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((6907, 6926), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6920, 6926), False, 'import os\n'), ((8106, 8132), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (8122, 8132), False, 'import os\n'), ((8168, 8278), 'harisekhon.utils.log.debug', 'log.debug', (['(\'no numeric regex match for file, probably not a sequential file\' +\n ", skipping \'%s\'")', 'filename'], {}), '(\'no numeric regex match for file, probably not a sequential file\' +\n ", skipping \'%s\'", filename)\n', (8177, 8278), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((8529, 8554), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (8544, 8554), False, 'import os\n'), ((8661, 8679), 'harisekhon.utils.isInt', 'isInt', (['file_number'], {}), '(file_number)\n', (8666, 8679), False, 'from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError\n'), ((6800, 6820), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6814, 6820), False, 'import os\n'), ((7000, 7020), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (7014, 7020), False, 'import os\n'), ((7244, 7272), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (7256, 7272), False, 'import os\n'), ((7543, 7570), 'os.path.join', 'os.path.join', (['root', 'dirname'], {}), '(root, dirname)\n', (7555, 7570), False, 'import os\n'), ((9593, 9631), 'os.path.isfile', 'os.path.isfile', (['explicit_last_filename'], {}), '(explicit_last_filename)\n', (9607, 9631), False, 'import os\n'), ((9961, 9999), 'glob.glob', 'glob.glob', (['expected_last_filename_glob'], {}), '(expected_last_filename_glob)\n', (9970, 9999), False, 'import glob\n')]
|
#!/usr/bin/env python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
from partner import PartnerService
def main():
"""
Entry point.
"""
DBusGMainLoop(set_as_default=True)
PartnerService()
GObject.MainLoop().run()
if __name__ == '__main__':
main()
|
[
"dbus.mainloop.glib.DBusGMainLoop",
"partner.PartnerService",
"gi.repository.GObject.MainLoop"
] |
[((249, 283), 'dbus.mainloop.glib.DBusGMainLoop', 'DBusGMainLoop', ([], {'set_as_default': '(True)'}), '(set_as_default=True)\n', (262, 283), False, 'from dbus.mainloop.glib import DBusGMainLoop\n'), ((288, 304), 'partner.PartnerService', 'PartnerService', ([], {}), '()\n', (302, 304), False, 'from partner import PartnerService\n'), ((309, 327), 'gi.repository.GObject.MainLoop', 'GObject.MainLoop', ([], {}), '()\n', (325, 327), False, 'from gi.repository import GObject\n')]
|
from django.conf.urls import url
from tworaven_apps.api_docs import views, views_swagger
urlpatterns = (
url(r'^grpc-test-form$',
views.view_test_form,
name='view_test_form'),
#url(r'^v1/swagger.yml$',
# views_swagger.view_swagger_doc_v1,
# name='view_swagger_doc_v1'),
)
|
[
"django.conf.urls.url"
] |
[((111, 179), 'django.conf.urls.url', 'url', (['"""^grpc-test-form$"""', 'views.view_test_form'], {'name': '"""view_test_form"""'}), "('^grpc-test-form$', views.view_test_form, name='view_test_form')\n", (114, 179), False, 'from django.conf.urls import url\n')]
|
"""
Authon: <NAME>
Data: 12/05/2018
"""
import random
def gera_cpf():#Função para gerar CPF
cpf = list(random.choices([0,1,2,3,4,5,6,7,8,9], k=9))#Gera o CPF Aleatório
#Cálculo do primeiro digito verificador
pesos = [10, 9, 8, 7, 6, 5, 4, 3, 2]
primeiro_digito = []
for idx,i in enumerate(cpf):
primeiro_digito.append(i * pesos[idx])
primeiro_digito = sum(primeiro_digito)
if (primeiro_digito % 11) < 2:
cpf.append(0)
else:
cpf.append(11 - (primeiro_digito % 11))
#Cálculo do segundo dígito verificador
pesos = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
segundo_digito = []
for idx,i in enumerate(cpf):
segundo_digito.append(i * pesos[idx])
segundo_digito = sum(segundo_digito)
if (segundo_digito % 11) < 2:
cpf.append(0)
else:
cpf.append(11 - (segundo_digito % 11))
return '{}{}{}.{}{}{}.{}{}{}-{}{}'.format(*cpf)
def verifica_cpf(cpf):#Função para verificar se o CPF é válido
cpf = cpf.replace('.','')
cpf = cpf.replace('-', '')
cpf = list(map(int, cpf))
cpf_temp = cpf[0:9]
pesos = [10, 9, 8, 7, 6, 5, 4, 3, 2]
primeiro_digito = []
for idx, i in enumerate(cpf_temp):
primeiro_digito.append(i * pesos[idx])
primeiro_digito = sum(primeiro_digito)
if (primeiro_digito % 11) < 2:
cpf_temp.append(0)
else:
cpf_temp.append(11 - (primeiro_digito % 11))
pesos = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
segundo_digito = []
for idx,i in enumerate(cpf_temp):
segundo_digito.append(i * pesos[idx])
segundo_digito = sum(segundo_digito)
if (segundo_digito % 11) < 2:
cpf_temp.append(0)
else:
cpf_temp.append(11 - (segundo_digito % 11))
if cpf == cpf_temp:
return 'CPF valido!'
else:
return 'CPF invalido'
for x in range(50):
print(gera_cpf())
|
[
"random.choices"
] |
[((110, 161), 'random.choices', 'random.choices', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {'k': '(9)'}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], k=9)\n', (124, 161), False, 'import random\n')]
|
import pygame
from pygame.math import Vector2
from pygame.rect import Rect
from UI.Button import Button
class Container:
""" Position in screen space
menuSize in screen space"""
def __init__(self, position: Vector2, menuSize: Vector2):
self.size = menuSize
self.position = position
self.buttons = []
"""Buttons in the list have to be placed in relation to the container and not the screen"""
def addButton(self, button: Button):
self.buttons.append(button);
def drawContainer(self, surface, fontRenderer):
pygame.draw.rect(surface, (0, 255, 0), Rect(self.position.x, self.position.y, self.size.x, self.size.y))
for i in range(len(self.buttons)):
self.buttons[i].draw(surface, self.position, fontRenderer)
def getButtonPressed(self, clickPos):
relativePos = clickPos - self.position
for button in self.buttons:
if button.rect.x < relativePos.x < button.rect.topright[0] and button.rect.y < relativePos.y < button.rect.bottomright[1]:
button.click()
return True
return False
|
[
"pygame.rect.Rect"
] |
[((618, 682), 'pygame.rect.Rect', 'Rect', (['self.position.x', 'self.position.y', 'self.size.x', 'self.size.y'], {}), '(self.position.x, self.position.y, self.size.x, self.size.y)\n', (622, 682), False, 'from pygame.rect import Rect\n')]
|
# Generated by Django 3.0.6 on 2020-05-30 23:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_formfield_multiple_choices'),
]
operations = [
migrations.AlterModelOptions(
name='formfield',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='formfield',
name='order',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"django.db.models.PositiveIntegerField",
"django.db.migrations.AlterModelOptions"
] |
[((239, 318), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""formfield"""', 'options': "{'ordering': ['order']}"}), "(name='formfield', options={'ordering': ['order']})\n", (267, 318), False, 'from django.db import migrations, models\n'), ((464, 502), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (491, 502), False, 'from django.db import migrations, models\n')]
|
from django.conf.urls import patterns, include, url
from db_storage.views import ImageView
urlpatterns = patterns('',
url(r'^(?P<file_name>[^/]+)$', ImageView.as_view(), name='db_storage_image'),
)
|
[
"db_storage.views.ImageView.as_view"
] |
[((154, 173), 'db_storage.views.ImageView.as_view', 'ImageView.as_view', ([], {}), '()\n', (171, 173), False, 'from db_storage.views import ImageView\n')]
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
# -- Path setup --------------------------------------------------------------
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'ICONService API References'
copyright = '2019, ICON Foundation'
author = 'ICON Foundation'
about = {}
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../iconservice/__version__.py')
with open(path, 'r', encoding='utf-8') as f:
exec(f.read(), about)
version = about["__version__"]
release = ''
# -- General configuration ---------------------------------------------------
needs_sphinx = '1.8'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'recommonmark'
]
source_suffix = {'.md': 'markdown'}
master_doc = 'index'
add_module_names = False
autodoc_mock_imports = [
"setproctitle",
"plyvel",
"earlgrey",
"iconcommons",
"coincurve",
]
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_show_sourcelink = False
# -- Options for HTMLHelp output ---------------------------------------------
htmlhelp_basename = 'ICONServicedoc'
# -- Options for manual page output ------------------------------------------
man_pages = [
(master_doc, 'iconservice', 'ICONService Documentation',
[author], 1)
]
# -- recommenmark configuration -------------------------------------------------
github_doc_root = 'https://github.com/rtfd/recommonmark/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
|
[
"os.path.abspath",
"os.path.dirname"
] |
[((414, 438), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (429, 438), False, 'import os\n'), ((672, 697), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (687, 697), False, 'import os\n')]
|
"""Processor to create histograms and generate plots from fed comparator code."""
import awkward as ak
from coffea import hist, processor
from coffea.nanoevents.methods import candidate
ak.behavior.update(candidate.behavior)
class ComparatorCodeProcessor(processor.ProcessorABC):
"""Runs the analysis."""
def __init__(self):
dataset_axis = hist.Cat("pcc", "Pattern-Comparator Code Combination")
"""Initialize."""
"""First, you need to define a multi-dimensional histogram to hold
the data. Follow the form.
"tree": hist.Hist(
"Thing we're counting",
hist.Bin("leaf", "$units$", #number of bins, #min value, #max value),
),"""
self._accumulator = processor.dict_accumulator(
{
"allevents": processor.defaultdict_accumulator(float),
"events": hist.Hist(
"Events",
dataset_axis,
hist.Bin("nMuons", "Number of muons", 6, 0, 6),
),
"LUT": hist.Hist(
"LUT",
dataset_axis,
hist.Bin("position", "$position$", 20, -1, 0),
hist.Bin("slope", "$slope$", 20, -0.5, 0.5),
hist.Bin("pt", "$pt$", 50, 0, 50),
hist.Bin("multiplicity", "$multiplicity$", 3, 1, 4),
),
}
)
@property
def accumulator(self):
"""Return pieces added together for each parallel processor."""
return self._accumulator
def process(self, events):
"""Operation done for each event."""
output = self.accumulator.identity()
dataset = events.metadata["dataset"]
output["allevents"][dataset] += len(events)
"""Now, you'll need to unzip the variable, this stores the data into
the histograms we defined earlier.
variable = ak.zip(
{
"leaf": location_in_root_file,
},
)"""
"""Finally, we must assign the histograms to the output to return
to template_executor.py for plotting.
output["variable"].fill(
leaf=ak.flatten(variable.leaf),
)"""
lut = ak.zip(
{
"position": events.position,
"slope": events.slope,
"pt": events.pt,
"multiplicity": events.multiplicity,
},
)
output["LUT"].fill(
pcc=dataset,
position=lut.position,
slope=lut.slope,
pt=lut.pt,
multiplicity=lut.multiplicity,
)
return output
def postprocess(self, accumulator):
"""Return our total."""
return accumulator
|
[
"awkward.zip",
"coffea.hist.Cat",
"awkward.behavior.update",
"coffea.hist.Bin",
"coffea.processor.defaultdict_accumulator"
] |
[((188, 226), 'awkward.behavior.update', 'ak.behavior.update', (['candidate.behavior'], {}), '(candidate.behavior)\n', (206, 226), True, 'import awkward as ak\n'), ((361, 415), 'coffea.hist.Cat', 'hist.Cat', (['"""pcc"""', '"""Pattern-Comparator Code Combination"""'], {}), "('pcc', 'Pattern-Comparator Code Combination')\n", (369, 415), False, 'from coffea import hist, processor\n'), ((2316, 2434), 'awkward.zip', 'ak.zip', (["{'position': events.position, 'slope': events.slope, 'pt': events.pt,\n 'multiplicity': events.multiplicity}"], {}), "({'position': events.position, 'slope': events.slope, 'pt': events.pt,\n 'multiplicity': events.multiplicity})\n", (2322, 2434), True, 'import awkward as ak\n'), ((855, 895), 'coffea.processor.defaultdict_accumulator', 'processor.defaultdict_accumulator', (['float'], {}), '(float)\n', (888, 895), False, 'from coffea import hist, processor\n'), ((1018, 1064), 'coffea.hist.Bin', 'hist.Bin', (['"""nMuons"""', '"""Number of muons"""', '(6)', '(0)', '(6)'], {}), "('nMuons', 'Number of muons', 6, 0, 6)\n", (1026, 1064), False, 'from coffea import hist, processor\n'), ((1200, 1245), 'coffea.hist.Bin', 'hist.Bin', (['"""position"""', '"""$position$"""', '(20)', '(-1)', '(0)'], {}), "('position', '$position$', 20, -1, 0)\n", (1208, 1245), False, 'from coffea import hist, processor\n'), ((1267, 1310), 'coffea.hist.Bin', 'hist.Bin', (['"""slope"""', '"""$slope$"""', '(20)', '(-0.5)', '(0.5)'], {}), "('slope', '$slope$', 20, -0.5, 0.5)\n", (1275, 1310), False, 'from coffea import hist, processor\n'), ((1332, 1365), 'coffea.hist.Bin', 'hist.Bin', (['"""pt"""', '"""$pt$"""', '(50)', '(0)', '(50)'], {}), "('pt', '$pt$', 50, 0, 50)\n", (1340, 1365), False, 'from coffea import hist, processor\n'), ((1387, 1438), 'coffea.hist.Bin', 'hist.Bin', (['"""multiplicity"""', '"""$multiplicity$"""', '(3)', '(1)', '(4)'], {}), "('multiplicity', '$multiplicity$', 3, 1, 4)\n", (1395, 1438), False, 'from coffea import hist, processor\n')]
|
import click
from network_analyzer.analyzer import Analyzer
@click.group()
def main():
pass
@main.command(short_help="Analyze networks")
@click.option(
"--jsonrpc",
help="JsonRPC URL of the ethereum client",
default="https://tlbc.rpc.anyblock.tools",
show_default=True,
metavar="URL",
)
@click.option(
"--relay",
"relay_api_url",
help="Relay API URL",
default="http://localhost:5000/api/v1",
show_default=True,
metavar="URL",
)
@click.option(
"--output",
"output_path",
help="Path of the directory to output the csv to",
default=None,
type=click.Path(dir_okay=True, writable=True),
)
def analyze(jsonrpc: str, relay_api_url: str, output_path: str):
analyzer = Analyzer(jsonrpc, output_path, relay_api_url)
analyzer.analyze_bridge_transfers()
analyzer.analyze_networks()
analyzer.analyze_dead_identities()
|
[
"click.group",
"click.option",
"network_analyzer.analyzer.Analyzer",
"click.Path"
] |
[((64, 77), 'click.group', 'click.group', ([], {}), '()\n', (75, 77), False, 'import click\n'), ((147, 301), 'click.option', 'click.option', (['"""--jsonrpc"""'], {'help': '"""JsonRPC URL of the ethereum client"""', 'default': '"""https://tlbc.rpc.anyblock.tools"""', 'show_default': '(True)', 'metavar': '"""URL"""'}), "('--jsonrpc', help='JsonRPC URL of the ethereum client',\n default='https://tlbc.rpc.anyblock.tools', show_default=True, metavar='URL'\n )\n", (159, 301), False, 'import click\n'), ((317, 458), 'click.option', 'click.option', (['"""--relay"""', '"""relay_api_url"""'], {'help': '"""Relay API URL"""', 'default': '"""http://localhost:5000/api/v1"""', 'show_default': '(True)', 'metavar': '"""URL"""'}), "('--relay', 'relay_api_url', help='Relay API URL', default=\n 'http://localhost:5000/api/v1', show_default=True, metavar='URL')\n", (329, 458), False, 'import click\n'), ((738, 783), 'network_analyzer.analyzer.Analyzer', 'Analyzer', (['jsonrpc', 'output_path', 'relay_api_url'], {}), '(jsonrpc, output_path, relay_api_url)\n', (746, 783), False, 'from network_analyzer.analyzer import Analyzer\n'), ((613, 653), 'click.Path', 'click.Path', ([], {'dir_okay': '(True)', 'writable': '(True)'}), '(dir_okay=True, writable=True)\n', (623, 653), False, 'import click\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2015-04-22
"""ZooKeeper 客户端。
和 asyncio 联用时请注意:
所有 watch observer 必须使用 reactor.callFromThread() 将 watch 结果返回给 twisted 线程。
为调用方便,请使用 twisted_kazoo.twisted_callback 对回调进行封装。
"""
import logging
import threading
from queue import Queue
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.protocol.states import KazooState, KeeperState
from gcommon.aio import gasync
from gcommon.utils.gnet import ConnectionStatus
logger = logging.getLogger('kazoo')
class ZookeeperObserver(object):
ZK_Conn_Connecting = 'CONNECTING'
def __init__(self):
self._client_manager = None
self._kazoo_client = None
self._conn_status = ConnectionStatus.Initialized
self._zk_conn_status = self.ZK_Conn_Connecting
def set_client_manager(self, client_manager):
self._client_manager = client_manager
self._kazoo_client = self._client_manager.kazoo_client
def on_connection_failed(self, reason=None):
"""Client Manager 回调"""
logger.error('cannot connect to zookeeper, reason: %s', reason)
self._conn_status = ConnectionStatus.Closed
self._zk_conn_status = KazooState.LOST
gasync.run_in_main_thread(self._on_conn_failed)
def _on_conn_opened(self):
"""连接打开或者恢复"""
pass
def _on_conn_lost(self):
"""会话断开"""
pass
def _on_conn_suspended(self):
"""连接断开,会话挂起,尝试恢复中"""
pass
def _on_conn_failed(self):
"""第一次连接失败,无法建立会话"""
pass
def on_connection_status_changed(self, state):
"""在 ZK 的独立线程中调用(禁止在主线程调用)"""
logger.debug('connection status changed from %s to %s', self._zk_conn_status, state)
self._zk_conn_status = state
if state == KazooState.CONNECTED:
if self._kazoo_client.client_state == KeeperState.CONNECTED_RO:
logger.debug("Read only mode!")
else:
logger.debug("Read/Write mode!")
self._conn_status = ConnectionStatus.Connected
gasync.run_in_main_thread(self._on_conn_opened)
elif state == KazooState.LOST:
logger.debug('kazoo connection lost (client closed)')
self._conn_status = ConnectionStatus.Closed
gasync.run_in_main_thread(self._on_conn_lost)
elif state == KazooState.SUSPENDED:
logger.debug('kazoo connection suspended (maybe the server is gone)')
self._conn_status = ConnectionStatus.Suspended
gasync.run_in_main_thread(self._on_conn_suspended)
class _ZookeeperClientThread(threading.Thread):
"""运行 kazoo 客户端的专用线程。"""
def __init__(self, client):
threading.Thread.__init__(self, daemon=True)
self._client = client
def run(self):
logger.info('enter kazoo thread')
self._client.thread_main()
logger.info('leave kazoo thread')
class ZookeeperClient(object):
"""Kazoo 客户端管理器,用于管理 zk connection 和跨线程通信。
不处理任何实际业务。处理业务的是 ZookeeperService.
"""
def __init__(self, observer, server_addr):
self._observer = observer
self._kazoo_client = KazooClient(hosts=server_addr)
self._q_service_control = Queue()
self._is_running = True
self._thread = _ZookeeperClientThread(self)
@property
def kazoo_client(self):
return self._kazoo_client
def is_running(self):
return self._is_running
def send_control_message(self, message):
"""发送控制消息,控制消息必须在客户端的启动线程中处理"""
self._q_service_control.put(message)
def _process_service_control_message(self):
"""处理控制消息"""
message = self._q_service_control.get()
logger.debug('process control message: %s', message)
if message == "stop":
self._is_running = False
self._kazoo_client.stop()
def start(self):
"""启动独立线程运行 zookeeper 客户端 - 主线程调用"""
assert gasync.AsyncThreads.is_main_loop()
logger.info('start kazoo client')
self._kazoo_client.add_listener(self._observer.on_connection_status_changed)
self._thread.start()
def stop(self):
logger.info('stop kazoo client')
self.send_control_message('stop')
def wait(self):
logger.info('wait kazoo client exiting')
self._thread.join()
logger.info('kazoo client stopped')
def thread_main(self):
"""尝试连接服务器,如果多次连接失败则抛出超时错"""
try:
self._kazoo_client.start()
except KazooTimeoutError as e:
self._observer.on_connection_failed(e)
return
except Exception as e:
self._observer.on_connection_failed(e)
return
while self.is_running():
self._process_service_control_message()
def create_lock(self, node_root, node_name):
return KazooLock(self._kazoo_client, node_root, node_name)
class KazooLock(object):
def __init__(self, client: KazooClient, node_root, node_name):
self._kazoo_client = client
self._node_root = node_root
self._node_name = node_name
self._node_path = f"{node_root}/{node_name}."
self._full_path = ""
self._locked = False
async def acquire(self):
result = self._kazoo_client.create(
self._node_path, b"", makepath=True, ephemeral=True, sequence=True
)
event = gasync.AsyncEvent()
@gasync.callback_run_in_main_thread
def _on_lock_nodes_changed(nodes):
if not nodes:
return
nodes.sort(key=lambda x: x.split(".")[1], reverse=False)
name, _sequence = nodes[0].split(".")
if name == self._node_name:
self._full_path = f"{self._node_root}/{nodes[0]}"
event.notify(True)
self._kazoo_client.ChildrenWatch(self._node_root, _on_lock_nodes_changed)
await event.wait()
return self
def release(self):
try:
self._kazoo_client.delete(self._full_path)
except:
logger.fatal("kazoo lock release error, %s", self._node_path)
raise
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.release()
|
[
"threading.Thread.__init__",
"gcommon.aio.gasync.AsyncThreads.is_main_loop",
"kazoo.client.KazooClient",
"gcommon.aio.gasync.AsyncEvent",
"gcommon.aio.gasync.run_in_main_thread",
"queue.Queue",
"logging.getLogger"
] |
[((529, 555), 'logging.getLogger', 'logging.getLogger', (['"""kazoo"""'], {}), "('kazoo')\n", (546, 555), False, 'import logging\n'), ((1260, 1307), 'gcommon.aio.gasync.run_in_main_thread', 'gasync.run_in_main_thread', (['self._on_conn_failed'], {}), '(self._on_conn_failed)\n', (1285, 1307), False, 'from gcommon.aio import gasync\n'), ((2752, 2796), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'daemon': '(True)'}), '(self, daemon=True)\n', (2777, 2796), False, 'import threading\n'), ((3205, 3235), 'kazoo.client.KazooClient', 'KazooClient', ([], {'hosts': 'server_addr'}), '(hosts=server_addr)\n', (3216, 3235), False, 'from kazoo.client import KazooClient\n'), ((3270, 3277), 'queue.Queue', 'Queue', ([], {}), '()\n', (3275, 3277), False, 'from queue import Queue\n'), ((3997, 4031), 'gcommon.aio.gasync.AsyncThreads.is_main_loop', 'gasync.AsyncThreads.is_main_loop', ([], {}), '()\n', (4029, 4031), False, 'from gcommon.aio import gasync\n'), ((5459, 5478), 'gcommon.aio.gasync.AsyncEvent', 'gasync.AsyncEvent', ([], {}), '()\n', (5476, 5478), False, 'from gcommon.aio import gasync\n'), ((2116, 2163), 'gcommon.aio.gasync.run_in_main_thread', 'gasync.run_in_main_thread', (['self._on_conn_opened'], {}), '(self._on_conn_opened)\n', (2141, 2163), False, 'from gcommon.aio import gasync\n'), ((2338, 2383), 'gcommon.aio.gasync.run_in_main_thread', 'gasync.run_in_main_thread', (['self._on_conn_lost'], {}), '(self._on_conn_lost)\n', (2363, 2383), False, 'from gcommon.aio import gasync\n'), ((2582, 2632), 'gcommon.aio.gasync.run_in_main_thread', 'gasync.run_in_main_thread', (['self._on_conn_suspended'], {}), '(self._on_conn_suspended)\n', (2607, 2632), False, 'from gcommon.aio import gasync\n')]
|
import pandas as pd
import sklearn.datasets as datasets
import pandas_ml as pdml
import numpy as np
# READ DATASETS AND CLEAR NAN COLUMNS AND ROWS
search_data = pd.read_csv(
"c:/Users/<NAME>/Desktop/Arya/comp551/2020_US_weekly_symptoms_dataset.csv", sep=',',
header=0, engine='python')
search_data.dropna(axis="columns", how="all", inplace=True)
search_data.dropna(axis="rows", how="all", thresh=12, inplace=True)
search_data.dropna(axis="columns", how="all", inplace=True)
search_data['date'] = pd.to_datetime(search_data['date'])
search_data['week'] = search_data['date'].dt.isocalendar().week
search_data.sort_values(['open_covid_region_code', 'week'],
ascending=[True, True], inplace=True)
print(search_data.shape)
search_data.to_csv(
'c:/Users/<NAME>/Desktop/Arya/comp551/cleared_search.csv', index=0)
agg_data = pd.read_csv(
"c:/Users/<NAME>/Desktop/Arya/comp551/aggregated_cc_by.csv", sep=',',
header=0, engine='python')
# agg_data.dropna(axis="columns", how="all", inplace=True)
# EXTRACT USA DATA FROM DATASET 2
indexNames = agg_data[agg_data['open_covid_region_code'].str.find(
'US-') < 0].index
agg_data.drop(indexNames, inplace=True)
agg_data['date'] = pd.to_datetime(agg_data['date'])
indexNames2 = agg_data[agg_data['date'] < '2020-01-06'].index
agg_data.drop(indexNames2, inplace=True)
agg_data.dropna(axis="columns", how="all", inplace=True)
# # CONVERT DAILY DATA TO WEEKLY
agg_data['week'] = agg_data['date'].dt.isocalendar().week
agg_data.fillna(0, inplace=True)
print(agg_data.shape)
logic = {
# 'open_covid_region_code': 'first',
# 'region_name': 'first',
# 'cases_cumulative': 'last',
# 'cases_new': 'sum',
# 'cases_cumulative_per_million': 'last',
# 'cases_new_per_million': 'sum',
# 'deaths_cumulative': 'last',
# 'deaths_new': 'sum',
# 'deaths_cumulative_per_million': 'last',
# 'deaths_new_per_million': 'sum',
# 'tests_new': 'sum',
# 'tests_cumulative': 'last',
# 'tests_cumulative_per_thousand': 'last',
# 'tests_new_per_thousand': 'sum',
# 'test_units': 'last',
# 'hospitalized_current': 'mean',
'hospitalized_new': 'sum',
'hospitalized_cumulative': 'last',
# 'discharged_new': 'sum',
# 'discharged_cumulative': 'last',
# 'icu_current': 'mean',
# 'icu_cumulative': 'last',
# 'ventilator_current': 'mean',
# 'school_closing': 'max',
# 'school_closing_flag': 'max',
# 'workplace_closing': 'max',
# 'workplace_closing_flag': 'max',
# 'cancel_public_events_flag': 'max',
# 'restrictions_on_gatherings': 'max',
# 'restrictions_on_gatherings_flag': 'max',
# 'close_public_transit': 'max',
# 'close_public_transit_flag': 'max',
# 'stay_at_home_requirements': 'max',
# 'stay_at_home_requirements_flag': 'max',
# 'restrictions_on_internal_movement': 'max',
# 'restrictions_on_internal_movement_flag': 'max',
# 'international_travel_controls': 'max',
# 'income_support': 'max',
# 'income_support_flag': 'max',
# 'debt_contract_relief': 'max',
# 'fiscal_measures': 'max',
# 'international_support': 'max',
# 'public_information_campaigns': 'max',
# 'public_information_campaigns_flag': 'max',
# 'testing_policy': 'max',
# 'contact_tracing': 'max',
# 'emergency_investment_in_healthcare': 'max',
# 'investment_in_vaccines': 'max',
# 'wildcard': 'max',
# 'confirmed_cases': 'last',
# 'confirmed_deaths': 'last',
# 'stringency_index': 'max',
# 'stringency_index_for_display': 'max',
# 'stringency_legacy_index': 'max',
# 'stringency_legacy_index_for_display': 'max',
# 'government_response_index': 'max',
# 'government_response_index_for_display': 'max',
# 'containment_health_index': 'max',
# 'containment_health_index_for_display': 'max',
# 'economic_support_index': 'max',
# 'economic_support_index_for_display': 'max'
}
df1 = agg_data.groupby(
['open_covid_region_code', 'week'], as_index=False).agg(logic)
print(df1.shape)
df1.to_csv('c:/Users/<NAME>/Desktop/Arya/comp551/cleared_agg.csv')
df2 = pd.merge(left=search_data, right=df1,
on=['open_covid_region_code', 'week'])
df2.to_csv('c:/Users/<NAME>/Desktop/Arya/comp551/merged_data.csv', index=0)
print(df2.shape)
# SET TARGET AND NORMALIZE DATA
# dataframe = pdml.ModelFrame(df2.to_dict(orient='list'))
|
[
"pandas.read_csv",
"pandas.merge",
"pandas.to_datetime"
] |
[((166, 298), 'pandas.read_csv', 'pd.read_csv', (['"""c:/Users/<NAME>/Desktop/Arya/comp551/2020_US_weekly_symptoms_dataset.csv"""'], {'sep': '""","""', 'header': '(0)', 'engine': '"""python"""'}), "(\n 'c:/Users/<NAME>/Desktop/Arya/comp551/2020_US_weekly_symptoms_dataset.csv',\n sep=',', header=0, engine='python')\n", (177, 298), True, 'import pandas as pd\n'), ((517, 552), 'pandas.to_datetime', 'pd.to_datetime', (["search_data['date']"], {}), "(search_data['date'])\n", (531, 552), True, 'import pandas as pd\n'), ((880, 992), 'pandas.read_csv', 'pd.read_csv', (['"""c:/Users/<NAME>/Desktop/Arya/comp551/aggregated_cc_by.csv"""'], {'sep': '""","""', 'header': '(0)', 'engine': '"""python"""'}), "('c:/Users/<NAME>/Desktop/Arya/comp551/aggregated_cc_by.csv',\n sep=',', header=0, engine='python')\n", (891, 992), True, 'import pandas as pd\n'), ((1251, 1283), 'pandas.to_datetime', 'pd.to_datetime', (["agg_data['date']"], {}), "(agg_data['date'])\n", (1265, 1283), True, 'import pandas as pd\n'), ((4233, 4309), 'pandas.merge', 'pd.merge', ([], {'left': 'search_data', 'right': 'df1', 'on': "['open_covid_region_code', 'week']"}), "(left=search_data, right=df1, on=['open_covid_region_code', 'week'])\n", (4241, 4309), True, 'import pandas as pd\n')]
|
from infi.execute import execute_assert_success
from .scsi import AixModelMixin, AixSCSIBlockDevice
from .native_multipath import AixMultipathBlockDevice
from infi.storagemodel.errors import DeviceError
class AixRescan(AixModelMixin):
def _add_new_devices(self):
execute_assert_success(["cfgmgr"])
def _get_all_devices(self, multipath):
klass = AixSCSIBlockDevice if not multipath else AixMultipathBlockDevice
devices = [klass(dev) for dev in self._get_dev_by_class("dac")] + \
[klass(dev) for dev in self._get_dev_by_class("disk")]
multipath_devices = self._get_multipath_devices()
filter_in = lambda dev: dev.get_display_name() in multipath_devices
filter_out = lambda dev: dev.get_display_name() not in multipath_devices
return list(filter(filter_in if multipath else filter_out, devices))
def _do_report_luns(self, device_name):
from infi.asi.executers import aix as aix_executer
from infi.asi.coroutines.sync_adapter import sync_wait as _sync_wait
from infi.asi.cdb.report_luns import ReportLunsCommand
device = "/dev/{}" + device_name
select_report = 0
with aix_executer(device) as executer:
command = ReportLunsCommand(select_report=int(select_report))
result = _sync_wait(command.execute(executer))
return result.lun_list
def _remove_missing_scsi_devices(self):
devices = self._get_all_devices(False)
# go over all devices, build a dict that contains: hct -> dict of lun->device-name
hcts = dict()
for device in devices:
hctl = device.get_hctl()
hct = (hctl.get_host(), hctl.get_channel(), hctl.get_target())
hct_luns = hcts[hct].setdefault(dict())
hct_luns[hctl.get_lun()] = device.get_display_name()
# do SCSI report luns on lun 0 of each hct, then remove the luns we see that are not returned
for hct, hct_luns in hcts.values():
lun0_device = hct_luns[0] # LUN 0 must exist
actual_luns = self._do_report_luns(lun0_device)
missing_luns = set(hct_luns.keys()) - set(actual_luns)
for missing_lun in missing_luns:
dev_name = hct_luns[missing_lun]
execute_assert_success(["rmdev", "-dl", dev_name])
def _remove_missing_multipath_devices(self):
devices = self._get_all_devices(True)
for device in devices:
try:
# try to send an IO to make the OS refresh the state path
device.get_scsi_standard_inquiry()
except DeviceError:
pass
paths_states = {path: path.get_state() for path in device.get_paths()}
if all(state == "down" for state in paths_states.values()):
execute_assert_success(["rmdev", "-dl", device.get_display_name()])
continue
for path, path_state in paths_states.items():
if path_state == "down":
execute_assert_success(["rmpath", "-dl", device.get_display_name(), "-i", path.get_path_id()])
def rescan(self):
self._add_new_devices()
# TODO: The logic here is bad... We use information from the OS instead of checking the fabric itself.
# for multipath devices we assume the "state" of the paths is updated
# for scsi devices it's even worse, because we need 'get_hctl' when going over the devices, which uses
# the ODM to find the target and LUN. This will fail for devices that are not defined - so for now
# we don't remove missing SCSI devices and we assume the OS information is updated for multipath devices...
# self._remove_missing_scsi_devices()
self._remove_missing_multipath_devices()
|
[
"infi.execute.execute_assert_success",
"infi.asi.executers.aix"
] |
[((276, 310), 'infi.execute.execute_assert_success', 'execute_assert_success', (["['cfgmgr']"], {}), "(['cfgmgr'])\n", (298, 310), False, 'from infi.execute import execute_assert_success\n'), ((1202, 1222), 'infi.asi.executers.aix', 'aix_executer', (['device'], {}), '(device)\n', (1214, 1222), True, 'from infi.asi.executers import aix as aix_executer\n'), ((2317, 2367), 'infi.execute.execute_assert_success', 'execute_assert_success', (["['rmdev', '-dl', dev_name]"], {}), "(['rmdev', '-dl', dev_name])\n", (2339, 2367), False, 'from infi.execute import execute_assert_success\n')]
|
from flask_wtf import Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField,StringField
from wtforms.validators import DataRequired, Length, Email, Regexp
from wtforms import ValidationError
from flask_pagedown.fields import PageDownField
from ..models import Role, User,BID_action
from wtforms import StringField, PasswordField, BooleanField, SubmitField,FieldList,FileField,SelectField
class NameForm(Form):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
class BID_dataForm(Form):
# IDnumber = SelectField("设备类型", choices=[('手持机', '手持机'), ('脚扣', '脚扣')])
IDnumber = StringField("身份证号", validators=[DataRequired(),Length(18),Regexp('^[0-9](X|x){0,1}',message=u'请输入正确的身份证号')])
BIDnumber = StringField("标书号", validators=[DataRequired(),Length(8),Regexp('^[0-9]',message=u'请输入正确的标书号')])
BIDpassword = StringField("标书密码", validators=[DataRequired(),Length(4),Regexp('^[0-9]',message=u'请输入正确的标书密码')])
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('创建标书号')
def __init__(self, user, *args, **kwargs):
super(BID_dataForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class BID_actionForm(Form):
diff_choices=[(i*100+400,i*100+400) for i in range(12)]
refer_time_choices=[(i+40,i+40) for i in range(16)]
bid_time_choices=[(i+54,i+54) for i in range(2)]
delay_time_choices=[(i*0.1,i*0.1) for i in range(10)]
ahead_price_choices=[(i*100,i*100) for i in range(4)]
diff = SelectField(u"相差价格",coerce=int, choices=diff_choices) #参考时间差价
refer_time = SelectField(u"参考价格时间",coerce=int,choices=refer_time_choices,default=(50,50)) #参考时间
bid_time = SelectField(u"出价时间",coerce=int,choices=bid_time_choices,default=(55,55)) #出价截止时间
delay_time = SelectField(u"出价延迟",coerce=float, choices=delay_time_choices) #出价延迟时间,0.1~0.9
ahead_price = SelectField(u"出价提前",coerce=int,choices=ahead_price_choices,default=(100,100)) #出价提前价格
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('创建策略')
def __init__(self, user, *args, **kwargs):
super(BID_actionForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class Edit_BID_dataForm(Form):
# IDnumber = SelectField("设备类型", choices=[('手持机', '手持机'), ('脚扣', '脚扣')])
IDnumber = StringField("身份证号", validators=[DataRequired(),Length(18),Regexp('^[0-9](X|x){0,1}',message=u'请输入正确的身份证号')])
BIDnumber = StringField("标书号", validators=[DataRequired(),Length(8),Regexp('^[0-9]',message=u'请输入正确的标书号')])
BIDpassword = StringField("标书密码", validators=[DataRequired(),Length(4),Regexp('^[0-9]',message=u'请输入正确的标书密码')])
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField('提交修改')
delete = SubmitField('删除')
def __init__(self, user, *args, **kwargs):
super(Edit_BID_dataForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
class Edit_BID_actionForm(Form):
diff_choices=[(i*100+400,i*100+400) for i in range(12)]
refer_time_choices=[(i+40,i+40) for i in range(16)]
bid_time_choices=[(i+54,i+54) for i in range(2)]
delay_time_choices=[(i*0.1,i*0.1) for i in range(10)]
ahead_price_choices=[(i*100,i*100) for i in range(4)]
diff = SelectField(u"相差价格",coerce=int, choices=diff_choices) #参考时间差价
refer_time = SelectField(u"参考价格时间",coerce=int,choices=refer_time_choices) #参考时间
bid_time = SelectField(u"出价时间",coerce=int,choices=bid_time_choices) #出价截止时间
delay_time = SelectField(u"出价延迟",coerce=float, choices=delay_time_choices) #出价延迟时间,0.1~0.9
ahead_price = SelectField(u"出价提前",coerce=int,choices=ahead_price_choices) #出价提前价格
action_user = SelectField('拍手选择:', coerce=int)
# 提交按钮
submit = SubmitField(u'提交修改')
delete = SubmitField(u'删除策略')
def __init__(self, user, *args, **kwargs):
super(Edit_BID_actionForm, self).__init__(*args, **kwargs)
self.action_user.choices = [(user.id, user.username)
for user in User.query.order_by(User.username).all()]
self.user = user
###文件上传
class FileForm(Form):
file1=FileField('第一次出价')
file2=FileField('最后一次出价')
file3=FileField('结果')
file4=FileField('出价视频')
submit=SubmitField('Submit')
###查询
class InquiryForm(Form):
keyword=StringField('内容')
submit=SubmitField('查询')
#------------------------------------------停用
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class CommentForm(Form):
body = StringField('Enter your comment', validators=[DataRequired()])
submit = SubmitField('Submit')
####修改过
class BulletinForm(Form):
dt=StringField('时间')
price=StringField('价格',validators=[DataRequired()])
names = FieldList(StringField('名称'), label='物品列表', min_entries=1)
|
[
"wtforms.ValidationError",
"wtforms.SelectField",
"wtforms.validators.Length",
"wtforms.validators.Email",
"wtforms.BooleanField",
"wtforms.validators.Regexp",
"wtforms.TextAreaField",
"wtforms.SubmitField",
"wtforms.FileField",
"wtforms.StringField",
"wtforms.validators.DataRequired"
] |
[((541, 562), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (552, 562), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1036, 1068), 'wtforms.SelectField', 'SelectField', (['"""拍手选择:"""'], {'coerce': 'int'}), "('拍手选择:', coerce=int)\n", (1047, 1068), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1089, 1109), 'wtforms.SubmitField', 'SubmitField', (['"""创建标书号"""'], {}), "('创建标书号')\n", (1100, 1109), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1714, 1768), 'wtforms.SelectField', 'SelectField', (['u"""相差价格"""'], {'coerce': 'int', 'choices': 'diff_choices'}), "(u'相差价格', coerce=int, choices=diff_choices)\n", (1725, 1768), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1793, 1878), 'wtforms.SelectField', 'SelectField', (['u"""参考价格时间"""'], {'coerce': 'int', 'choices': 'refer_time_choices', 'default': '(50, 50)'}), "(u'参考价格时间', coerce=int, choices=refer_time_choices, default=(50, 50)\n )\n", (1804, 1878), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1891, 1967), 'wtforms.SelectField', 'SelectField', (['u"""出价时间"""'], {'coerce': 'int', 'choices': 'bid_time_choices', 'default': '(55, 55)'}), "(u'出价时间', coerce=int, choices=bid_time_choices, default=(55, 55))\n", (1902, 1967), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((1989, 2051), 'wtforms.SelectField', 'SelectField', (['u"""出价延迟"""'], {'coerce': 'float', 'choices': 'delay_time_choices'}), "(u'出价延迟', coerce=float, choices=delay_time_choices)\n", (2000, 2051), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((2085, 2170), 'wtforms.SelectField', 'SelectField', (['u"""出价提前"""'], {'coerce': 'int', 'choices': 'ahead_price_choices', 'default': '(100, 100)'}), "(u'出价提前', coerce=int, choices=ahead_price_choices, default=(100,\n 100))\n", (2096, 2170), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((2189, 2221), 'wtforms.SelectField', 'SelectField', (['"""拍手选择:"""'], {'coerce': 'int'}), "('拍手选择:', coerce=int)\n", (2200, 2221), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((2242, 2261), 'wtforms.SubmitField', 'SubmitField', (['"""创建策略"""'], {}), "('创建策略')\n", (2253, 2261), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3019, 3051), 'wtforms.SelectField', 'SelectField', (['"""拍手选择:"""'], {'coerce': 'int'}), "('拍手选择:', coerce=int)\n", (3030, 3051), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3072, 3091), 'wtforms.SubmitField', 'SubmitField', (['"""提交修改"""'], {}), "('提交修改')\n", (3083, 3091), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3105, 3122), 'wtforms.SubmitField', 'SubmitField', (['"""删除"""'], {}), "('删除')\n", (3116, 3122), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3736, 3790), 'wtforms.SelectField', 'SelectField', (['u"""相差价格"""'], {'coerce': 'int', 'choices': 'diff_choices'}), "(u'相差价格', coerce=int, choices=diff_choices)\n", (3747, 3790), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3815, 3877), 'wtforms.SelectField', 'SelectField', (['u"""参考价格时间"""'], {'coerce': 'int', 'choices': 'refer_time_choices'}), "(u'参考价格时间', coerce=int, choices=refer_time_choices)\n", (3826, 3877), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3897, 3955), 'wtforms.SelectField', 'SelectField', (['u"""出价时间"""'], {'coerce': 'int', 'choices': 'bid_time_choices'}), "(u'出价时间', coerce=int, choices=bid_time_choices)\n", (3908, 3955), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((3979, 4041), 'wtforms.SelectField', 'SelectField', (['u"""出价延迟"""'], {'coerce': 'float', 'choices': 'delay_time_choices'}), "(u'出价延迟', coerce=float, choices=delay_time_choices)\n", (3990, 4041), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4075, 4136), 'wtforms.SelectField', 'SelectField', (['u"""出价提前"""'], {'coerce': 'int', 'choices': 'ahead_price_choices'}), "(u'出价提前', coerce=int, choices=ahead_price_choices)\n", (4086, 4136), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4161, 4193), 'wtforms.SelectField', 'SelectField', (['"""拍手选择:"""'], {'coerce': 'int'}), "('拍手选择:', coerce=int)\n", (4172, 4193), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4214, 4234), 'wtforms.SubmitField', 'SubmitField', (['u"""提交修改"""'], {}), "(u'提交修改')\n", (4225, 4234), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4248, 4268), 'wtforms.SubmitField', 'SubmitField', (['u"""删除策略"""'], {}), "(u'删除策略')\n", (4259, 4268), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4596, 4614), 'wtforms.FileField', 'FileField', (['"""第一次出价"""'], {}), "('第一次出价')\n", (4605, 4614), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4625, 4644), 'wtforms.FileField', 'FileField', (['"""最后一次出价"""'], {}), "('最后一次出价')\n", (4634, 4644), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4655, 4670), 'wtforms.FileField', 'FileField', (['"""结果"""'], {}), "('结果')\n", (4664, 4670), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4681, 4698), 'wtforms.FileField', 'FileField', (['"""出价视频"""'], {}), "('出价视频')\n", (4690, 4698), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4710, 4731), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (4721, 4731), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4776, 4793), 'wtforms.StringField', 'StringField', (['"""内容"""'], {}), "('内容')\n", (4787, 4793), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((4805, 4822), 'wtforms.SubmitField', 'SubmitField', (['"""查询"""'], {}), "('查询')\n", (4816, 4822), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((5053, 5078), 'wtforms.TextAreaField', 'TextAreaField', (['"""About me"""'], {}), "('About me')\n", (5066, 5078), False, 'from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, StringField\n'), ((5092, 5113), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (5103, 5113), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((5581, 5606), 'wtforms.BooleanField', 'BooleanField', (['"""Confirmed"""'], {}), "('Confirmed')\n", (5593, 5606), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((5618, 5649), 'wtforms.SelectField', 'SelectField', (['"""Role"""'], {'coerce': 'int'}), "('Role', coerce=int)\n", (5629, 5649), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((5796, 5821), 'wtforms.TextAreaField', 'TextAreaField', (['"""About me"""'], {}), "('About me')\n", (5809, 5821), False, 'from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, StringField\n'), ((5835, 5856), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (5846, 5856), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((6681, 6702), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (6692, 6702), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((6746, 6763), 'wtforms.StringField', 'StringField', (['"""时间"""'], {}), "('时间')\n", (6757, 6763), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((6842, 6859), 'wtforms.StringField', 'StringField', (['"""名称"""'], {}), "('名称')\n", (6853, 6859), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, FieldList, FileField, SelectField\n'), ((6294, 6338), 'wtforms.ValidationError', 'ValidationError', (['"""Email already registered."""'], {}), "('Email already registered.')\n", (6309, 6338), False, 'from wtforms import ValidationError\n'), ((6515, 6558), 'wtforms.ValidationError', 'ValidationError', (['"""Username already in use."""'], {}), "('Username already in use.')\n", (6530, 6558), False, 'from wtforms import ValidationError\n'), ((511, 525), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (523, 525), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((721, 735), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (733, 735), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((736, 746), 'wtforms.validators.Length', 'Length', (['(18)'], {}), '(18)\n', (742, 746), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((747, 796), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9](X|x){0,1}"""'], {'message': 'u"""请输入正确的身份证号"""'}), "('^[0-9](X|x){0,1}', message=u'请输入正确的身份证号')\n", (753, 796), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((843, 857), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (855, 857), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((858, 867), 'wtforms.validators.Length', 'Length', (['(8)'], {}), '(8)\n', (864, 867), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((868, 906), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9]"""'], {'message': 'u"""请输入正确的标书号"""'}), "('^[0-9]', message=u'请输入正确的标书号')\n", (874, 906), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((960, 974), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (972, 974), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((975, 984), 'wtforms.validators.Length', 'Length', (['(4)'], {}), '(4)\n', (981, 984), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((985, 1024), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9]"""'], {'message': 'u"""请输入正确的标书密码"""'}), "('^[0-9]', message=u'请输入正确的标书密码')\n", (991, 1024), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2702, 2716), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2714, 2716), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2717, 2727), 'wtforms.validators.Length', 'Length', (['(18)'], {}), '(18)\n', (2723, 2727), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2728, 2777), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9](X|x){0,1}"""'], {'message': 'u"""请输入正确的身份证号"""'}), "('^[0-9](X|x){0,1}', message=u'请输入正确的身份证号')\n", (2734, 2777), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2824, 2838), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2836, 2838), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2839, 2848), 'wtforms.validators.Length', 'Length', (['(8)'], {}), '(8)\n', (2845, 2848), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2849, 2887), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9]"""'], {'message': 'u"""请输入正确的标书号"""'}), "('^[0-9]', message=u'请输入正确的标书号')\n", (2855, 2887), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2941, 2955), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2953, 2955), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2956, 2965), 'wtforms.validators.Length', 'Length', (['(4)'], {}), '(4)\n', (2962, 2965), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((2966, 3005), 'wtforms.validators.Regexp', 'Regexp', (['"""^[0-9]"""'], {'message': 'u"""请输入正确的标书密码"""'}), "('^[0-9]', message=u'请输入正确的标书密码')\n", (2972, 3005), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((4955, 4968), 'wtforms.validators.Length', 'Length', (['(0)', '(64)'], {}), '(0, 64)\n', (4961, 4968), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5022, 5035), 'wtforms.validators.Length', 'Length', (['(0)', '(64)'], {}), '(0, 64)\n', (5028, 5035), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5195, 5209), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (5207, 5209), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5211, 5224), 'wtforms.validators.Length', 'Length', (['(1)', '(64)'], {}), '(1, 64)\n', (5217, 5224), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5271, 5278), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (5276, 5278), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5341, 5355), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (5353, 5355), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5357, 5370), 'wtforms.validators.Length', 'Length', (['(1)', '(64)'], {}), '(1, 64)\n', (5363, 5370), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5372, 5479), 'wtforms.validators.Regexp', 'Regexp', (['"""^[A-Za-z][A-Za-z0-9_.]*$"""', '(0)', '"""Usernames must have only letters, numbers, dots or underscores"""'], {}), "('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or underscores')\n", (5378, 5479), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5698, 5711), 'wtforms.validators.Length', 'Length', (['(0)', '(64)'], {}), '(0, 64)\n', (5704, 5711), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((5765, 5778), 'wtforms.validators.Length', 'Length', (['(0)', '(64)'], {}), '(0, 64)\n', (5771, 5778), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((6651, 6665), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (6663, 6665), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n'), ((6807, 6821), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (6819, 6821), False, 'from wtforms.validators import DataRequired, Length, Email, Regexp\n')]
|
#!/usr/bin/env python
"""
PyTorch datasets and data augmenters
"""
###########
# Imports #
###########
import cv2
import numpy as np
import os
import random
import torch
from PIL import Image, ImageFilter
from torch.utils import data
from torchvision import transforms
#############
# Functions #
#############
def to_pil(tensor):
'''Converts a tensor to a PIL image.'''
return transforms.functional.to_pil_image(tensor)
def to_tensor(pic):
'''Converts a PIL image to a tensor.'''
return transforms.functional.to_tensor(pic)
def to_mask(shape, polygons):
'''Builds a mask based on polygon annotations.'''
contours = [np.array(p, dtype=int) for p in polygons]
mask = np.zeros(shape, dtype=np.uint8)
cv2.drawContours(mask, contours, -1, color=255, thickness=-1)
return Image.fromarray(mask)
def to_contours(mask):
'''Converts a mask into OpenCV contours.'''
mask = np.array(mask)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
def clusterize(polygons, size):
'''Clusterize polygons.'''
clusters = {}
for polygon in polygons:
temp = np.array(polygon).astype(int)
xmin = np.amin(temp[:, 0]) // size
xmax = np.amax(temp[:, 0]) // size
ymin = np.amin(temp[:, 1]) // size
ymax = np.amax(temp[:, 1]) // size
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
key = x * size, y * size
if not key in clusters:
clusters[key] = []
clusters[key].append(polygon)
return clusters
###########
# Classes #
###########
class VIADataset(data.IterableDataset):
'''Iterable VIA dataset.'''
def __init__(self, via, path='./', size=256, shuffle=False, shift=0, full=False, alt=0):
self.via = {}
self.masks = {}
self.clusters = {}
self.size = size
for key, polygons in via.items():
imagename = os.path.join(path, key)
if os.path.exists(imagename):
image = Image.open(imagename)
self.via[imagename] = polygons
self.masks[imagename] = to_mask((image.height, image.width), polygons)
if self.size is not None:
self.clusters[imagename] = clusterize(polygons, self.size)
self.shuffle = shuffle # random order
self.shift = shift # random shift
self.full = full # all sub-images
self.alt = alt # alternate
def __len__(self):
if self.size is None:
return len(self.via)
elif self.full:
s = 0
for imagename in self.via:
image = Image.open(imagename)
s += (image.width // self.size) * (image.height // self.size)
return s
else:
return sum(map(len, self.clusters.values())) * (1 + self.alt)
def __iter__(self):
images = random.sample(
self.via.keys(),
len(self.via)
) if self.shuffle else self.via.keys()
for imagename in images:
image = Image.open(imagename).convert('RGB')
mask = self.masks[imagename]
if self.size is None:
yield image, mask
elif self.full:
for left in np.arange(0, image.width, self.size):
for upper in np.arange(0, image.height, self.size):
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
else:
clusters = list(self.clusters[imagename].keys())
if self.shuffle:
random.shuffle(clusters)
for left, upper in clusters:
# Shift
if self.shift > 0:
left += random.randint(-self.shift, self.shift)
upper += random.randint(-self.shift, self.shift)
# Out of bounds
left = min(left, image.width - self.size)
upper = min(upper, image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
# Alternate with random images
for _ in range(self.alt):
left = random.randrange(image.width - self.size)
upper = random.randrange(image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
class RandomChoice(data.IterableDataset):
'''Apply a randomly picked transformation to each pair (input, target).'''
def __init__(self, dataset, transforms, input_only=False):
super().__init__()
self.dataset = dataset
self.transforms = transforms
self.input_only = input_only
def __len__(self):
return len(self.dataset)
def __iter__(self):
for input, target in self.dataset:
f = random.choice(self.transforms)
yield f(input), target if self.input_only else f(target)
class ColorJitter(RandomChoice):
'''Color jitter.'''
def __init__(self, dataset, brightness=0.25, contrast=0.33, saturation=0.33, hue=0):
super().__init__(
dataset=dataset,
transforms=[transforms.ColorJitter(brightness, contrast, saturation, hue)],
input_only=True
)
class RandomFilter(RandomChoice):
'''Random image filter.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.filter(ImageFilter.BLUR),
lambda x: x.filter(ImageFilter.DETAIL),
lambda x: x.filter(ImageFilter.EDGE_ENHANCE),
lambda x: x.filter(ImageFilter.SMOOTH),
lambda x: x.filter(ImageFilter.SHARPEN)
],
input_only=True
)
class RandomTranspose(RandomChoice):
'''Random image transpose.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.transpose(Image.FLIP_LEFT_RIGHT),
lambda x: x.transpose(Image.FLIP_TOP_BOTTOM),
lambda x: x.transpose(Image.ROTATE_90),
lambda x: x.transpose(Image.ROTATE_180),
lambda x: x.transpose(Image.ROTATE_270),
lambda x: x.transpose(Image.TRANSPOSE)
],
input_only=False
)
class Scale(RandomChoice):
'''Scale image.'''
def __init__(self, dataset, scale):
super().__init__(
dataset=dataset,
transforms=[lambda x: x.resize(
(int(x.width * scale), int(x.height * scale))
)],
input_only=False
)
class ToTensor(RandomChoice):
'''To Tensor.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[to_tensor],
input_only=False
)
########
# Main #
########
if __name__ == '__main__':
# Imports
import argparse
import json
import via as VIA
# Arguments
parser = argparse.ArgumentParser(description='Format California annotations to the VIA format')
parser.add_argument('-e', '--ext', default='.tif', help='extension of the images')
parser.add_argument('-o', '--output', default='../products/json/california.json', help='output VIA file')
parser.add_argument('-p', '--path', default='../resources/california/', help='path to California resources')
args = parser.parse_args()
# Polygons
with open(os.path.join(args.path, 'SolarArrayPolygons.json'), 'r') as f:
panels = json.load(f)['polygons']
# VGG Image Annotations
via = {}
for panel in panels:
filename = panel['image_name'] + args.ext
polygon = panel['polygon_vertices_pixels']
## Skip dots and lines
if not len(polygon) > 3:
continue
## Add polygon
if filename not in via:
via[filename] = []
via[filename].append(polygon)
# Save
VIA.dump(via, args.output, path=args.path)
|
[
"torchvision.transforms.functional.to_tensor",
"argparse.ArgumentParser",
"numpy.amin",
"random.shuffle",
"numpy.arange",
"os.path.join",
"via.dump",
"random.randint",
"os.path.exists",
"torchvision.transforms.functional.to_pil_image",
"cv2.drawContours",
"torchvision.transforms.ColorJitter",
"json.load",
"numpy.zeros",
"random.choice",
"PIL.Image.open",
"numpy.amax",
"numpy.array",
"random.randrange",
"PIL.Image.fromarray",
"cv2.findContours"
] |
[((387, 429), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['tensor'], {}), '(tensor)\n', (421, 429), False, 'from torchvision import transforms\n'), ((501, 537), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['pic'], {}), '(pic)\n', (532, 537), False, 'from torchvision import transforms\n'), ((685, 716), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (693, 716), True, 'import numpy as np\n'), ((718, 779), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', '(-1)'], {'color': '(255)', 'thickness': '(-1)'}), '(mask, contours, -1, color=255, thickness=-1)\n', (734, 779), False, 'import cv2\n'), ((789, 810), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (804, 810), False, 'from PIL import Image, ImageFilter\n'), ((889, 903), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (897, 903), True, 'import numpy as np\n'), ((920, 986), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (936, 986), False, 'import cv2\n'), ((6144, 6235), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Format California annotations to the VIA format"""'}), "(description=\n 'Format California annotations to the VIA format')\n", (6167, 6235), False, 'import argparse\n'), ((7005, 7047), 'via.dump', 'VIA.dump', (['via', 'args.output'], {'path': 'args.path'}), '(via, args.output, path=args.path)\n', (7013, 7047), True, 'import via as VIA\n'), ((634, 656), 'numpy.array', 'np.array', (['p'], {'dtype': 'int'}), '(p, dtype=int)\n', (642, 656), True, 'import numpy as np\n'), ((1158, 1177), 'numpy.amin', 'np.amin', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (1165, 1177), True, 'import numpy as np\n'), ((1195, 1214), 'numpy.amax', 'np.amax', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (1202, 1214), True, 'import numpy as np\n'), ((1232, 1251), 'numpy.amin', 'np.amin', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (1239, 1251), True, 'import numpy as np\n'), ((1269, 1288), 'numpy.amax', 'np.amax', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (1276, 1288), True, 'import numpy as np\n'), ((1828, 1851), 'os.path.join', 'os.path.join', (['path', 'key'], {}), '(path, key)\n', (1840, 1851), False, 'import os\n'), ((1859, 1884), 'os.path.exists', 'os.path.exists', (['imagename'], {}), '(imagename)\n', (1873, 1884), False, 'import os\n'), ((4311, 4341), 'random.choice', 'random.choice', (['self.transforms'], {}), '(self.transforms)\n', (4324, 4341), False, 'import random\n'), ((6584, 6634), 'os.path.join', 'os.path.join', (['args.path', '"""SolarArrayPolygons.json"""'], {}), "(args.path, 'SolarArrayPolygons.json')\n", (6596, 6634), False, 'import os\n'), ((6658, 6670), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6667, 6670), False, 'import json\n'), ((1118, 1135), 'numpy.array', 'np.array', (['polygon'], {}), '(polygon)\n', (1126, 1135), True, 'import numpy as np\n'), ((1898, 1919), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (1908, 1919), False, 'from PIL import Image, ImageFilter\n'), ((2406, 2427), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (2416, 2427), False, 'from PIL import Image, ImageFilter\n'), ((2744, 2765), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (2754, 2765), False, 'from PIL import Image, ImageFilter\n'), ((2896, 2932), 'numpy.arange', 'np.arange', (['(0)', 'image.width', 'self.size'], {}), '(0, image.width, self.size)\n', (2905, 2932), True, 'import numpy as np\n'), ((4600, 4661), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['brightness', 'contrast', 'saturation', 'hue'], {}), '(brightness, contrast, saturation, hue)\n', (4622, 4661), False, 'from torchvision import transforms\n'), ((2952, 2989), 'numpy.arange', 'np.arange', (['(0)', 'image.height', 'self.size'], {}), '(0, image.height, self.size)\n', (2961, 2989), True, 'import numpy as np\n'), ((3187, 3211), 'random.shuffle', 'random.shuffle', (['clusters'], {}), '(clusters)\n', (3201, 3211), False, 'import random\n'), ((3297, 3336), 'random.randint', 'random.randint', (['(-self.shift)', 'self.shift'], {}), '(-self.shift, self.shift)\n', (3311, 3336), False, 'import random\n'), ((3352, 3391), 'random.randint', 'random.randint', (['(-self.shift)', 'self.shift'], {}), '(-self.shift, self.shift)\n', (3366, 3391), False, 'import random\n'), ((3699, 3740), 'random.randrange', 'random.randrange', (['(image.width - self.size)'], {}), '(image.width - self.size)\n', (3715, 3740), False, 'import random\n'), ((3755, 3797), 'random.randrange', 'random.randrange', (['(image.height - self.size)'], {}), '(image.height - self.size)\n', (3771, 3797), False, 'import random\n')]
|
import logging
import random
from django.conf import settings
from django.db import IntegrityError
from django.db.models import Q
from rest_access_policy import AccessPolicy
from rest_framework import viewsets
from store.const import ORDER_IDS_SESSION_PARAM_NAME
from store.models import Order
from store.serializers import MyOrderSerializer
log = logging.getLogger(__name__)
class MyOrderAccessPolicy(AccessPolicy):
statements = [
{
"action": ["list", "retrieve"],
"principal": ["*"],
"effect": "allow",
"condition": "can_view_my_order",
},
{
"action": ["create", "update", "partial_update", "destroy"],
"principal": ["*"],
"effect": "allow",
"condition": "can_moderate_my_order",
},
]
@staticmethod
def can_view_my_order(request, view, action) -> bool:
return request.user.has_perm('store.view_my_order')
@staticmethod
def can_moderate_my_order(request, view, action) -> bool:
return request.user.has_perm('store.moderate_my_order')
@classmethod
def scope_queryset(cls, request, view, action, qs):
if request.user.is_anonymous:
qs = qs.filter(user=None) # anonymous is allowed to see anonymous orders only
if action == 'list':
order_ids = request.session.get(ORDER_IDS_SESSION_PARAM_NAME, [])
qs = qs.filter(id__in=order_ids)
else:
if action == 'list':
order_ids = request.session.get(ORDER_IDS_SESSION_PARAM_NAME, [])
qs = qs.filter(Q(user=request.user) | Q(id__in=order_ids, user=None))
else:
qs = qs.filter(Q(user=request.user) | Q(user=None))
return qs
class MyOrderView(viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.UpdateModelMixin,
viewsets.mixins.ListModelMixin,
viewsets.GenericViewSet):
permission_classes = (MyOrderAccessPolicy,)
queryset = Order.objects
serializer_class = MyOrderSerializer
filterset_fields = ['status']
ordering = ['id']
@property
def access_policy(self):
return self.permission_classes[0]
def get_queryset(self):
return self.access_policy.scope_queryset(
self.request, self, self.action, super().get_queryset()
)
def perform_create(self, serializer):
if not self.request.user.is_anonymous:
return super().perform_create(serializer)
# it is allowed to anyone to access an order created by an anonymous user
# sequential id generation is vulnerable to pickup attacks
# if one knows his ID, he can guess which one should be next
# let's make it harder to guess
for _ in range(settings.ANONYMOUS_ORDER_ID_GENERATION_ITERATIONS):
try:
random.seed()
serializer.validated_data['id'] = random.randint(*settings.ANONYMOUS_ORDER_ID_GENERATION_RANGE)
super().perform_create(serializer)
break
except IntegrityError:
pass
else:
log.exception('Unable to generate anonymous Order-ID.')
raise
self.request.session.setdefault(ORDER_IDS_SESSION_PARAM_NAME, []).append(serializer.instance.id)
self.request.session.modified = True
|
[
"django.db.models.Q",
"random.seed",
"random.randint",
"logging.getLogger"
] |
[((351, 378), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (368, 378), False, 'import logging\n'), ((2983, 2996), 'random.seed', 'random.seed', ([], {}), '()\n', (2994, 2996), False, 'import random\n'), ((3047, 3108), 'random.randint', 'random.randint', (['*settings.ANONYMOUS_ORDER_ID_GENERATION_RANGE'], {}), '(*settings.ANONYMOUS_ORDER_ID_GENERATION_RANGE)\n', (3061, 3108), False, 'import random\n'), ((1639, 1659), 'django.db.models.Q', 'Q', ([], {'user': 'request.user'}), '(user=request.user)\n', (1640, 1659), False, 'from django.db.models import Q\n'), ((1662, 1692), 'django.db.models.Q', 'Q', ([], {'id__in': 'order_ids', 'user': 'None'}), '(id__in=order_ids, user=None)\n', (1663, 1692), False, 'from django.db.models import Q\n'), ((1744, 1764), 'django.db.models.Q', 'Q', ([], {'user': 'request.user'}), '(user=request.user)\n', (1745, 1764), False, 'from django.db.models import Q\n'), ((1767, 1779), 'django.db.models.Q', 'Q', ([], {'user': 'None'}), '(user=None)\n', (1768, 1779), False, 'from django.db.models import Q\n')]
|
import sys
sys.path.insert(0, '.')
import unittest
import load_params
import warnings
import verify_output
class TestVerifyOutput(unittest.TestCase):
def setUp(self):
self.params = load_params.load_params('test.in')
self.time = [0, 10, 20, 30]
self.tempW = [40, 42, 44, 46]
self.tempP = [40, 41.9, 43.8, 45.7]
def test_VO1(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert len(w) is 0
def test_VO2(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO3(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO4(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert issubclass(w[1].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[1].message)
class VerifyOutputSuite:
def suite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestVerifyOutput)
return suite
|
[
"load_params.load_params",
"sys.path.insert",
"verify_output.verify_output",
"warnings.catch_warnings",
"unittest.TestLoader"
] |
[((11, 34), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (26, 34), False, 'import sys\n'), ((197, 231), 'load_params.load_params', 'load_params.load_params', (['"""test.in"""'], {}), "('test.in')\n", (220, 231), False, 'import load_params\n'), ((459, 495), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (482, 495), False, 'import warnings\n'), ((514, 602), 'verify_output.verify_output', 'verify_output.verify_output', (['self.time', 'self.tempW', 'self.tempP', 'eW', 'eP', 'self.params'], {}), '(self.time, self.tempW, self.tempP, eW, eP, self\n .params)\n', (541, 602), False, 'import verify_output\n'), ((738, 774), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (761, 774), False, 'import warnings\n'), ((793, 881), 'verify_output.verify_output', 'verify_output.verify_output', (['self.time', 'self.tempW', 'self.tempP', 'eW', 'eP', 'self.params'], {}), '(self.time, self.tempW, self.tempP, eW, eP, self\n .params)\n', (820, 881), False, 'import verify_output\n'), ((1279, 1315), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1302, 1315), False, 'import warnings\n'), ((1334, 1422), 'verify_output.verify_output', 'verify_output.verify_output', (['self.time', 'self.tempW', 'self.tempP', 'eW', 'eP', 'self.params'], {}), '(self.time, self.tempW, self.tempP, eW, eP, self\n .params)\n', (1361, 1422), False, 'import verify_output\n'), ((1822, 1858), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1845, 1858), False, 'import warnings\n'), ((1877, 1965), 'verify_output.verify_output', 'verify_output.verify_output', (['self.time', 'self.tempW', 'self.tempP', 'eW', 'eP', 'self.params'], {}), '(self.time, self.tempW, self.tempP, eW, eP, self\n .params)\n', (1904, 1965), False, 'import verify_output\n'), ((2616, 2637), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2635, 2637), False, 'import unittest\n')]
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Module for creating and defining Quil programs.
"""
import warnings
from itertools import count
from math import pi
import numpy as np
from six import string_types
from pyquil._parser.PyQuilListener import run_parser
from pyquil.kraus import _check_kraus_ops, _create_kraus_pragmas
from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit
from .gates import MEASURE, STANDARD_GATES, H
from .quilbase import (DefGate, Gate, Measurement, Pragma, AbstractInstruction, Qubit,
Jump, Label, JumpConditional, JumpTarget, JumpUnless, JumpWhen, Addr)
class Program(object):
def __init__(self, *instructions):
self._defined_gates = []
# Implementation note: the key difference between the private _instructions and the public instructions
# property below is that the private _instructions list may contain placeholder values
self._instructions = []
# Performance optimization: as stated above _instructions may contain placeholder values so the program must
# first be synthesized. _synthesized_instructions is simply a cache on the result of the _synthesize() method.
# It is marked as None whenever new instructions are added.
self._synthesized_instructions = None
self.inst(*instructions)
@property
def defined_gates(self):
"""
A list of defined gates on the program.
"""
return self._defined_gates
@property
def instructions(self):
"""
Fill in any placeholders and return a list of quil AbstractInstructions.
"""
if self._synthesized_instructions is None:
self._synthesized_instructions = self._synthesize()
return self._synthesized_instructions
def inst(self, *instructions):
"""
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
"""
for instruction in instructions:
if isinstance(instruction, list):
self.inst(*instruction)
elif isinstance(instruction, tuple):
if len(instruction) == 0:
raise ValueError("tuple should have at least one element")
elif len(instruction) == 1:
self.inst(instruction[0])
else:
op = instruction[0]
if op == "MEASURE":
if len(instruction) == 2:
self.measure(instruction[1])
else:
self.measure(instruction[1], instruction[2])
else:
params = []
possible_params = instruction[1]
rest = instruction[2:]
if isinstance(possible_params, list):
params = possible_params
else:
rest = [possible_params] + list(rest)
self.gate(op, params, rest)
elif isinstance(instruction, string_types):
self.inst(run_parser(instruction.strip()))
elif isinstance(instruction, Program):
if id(self) == id(instruction):
raise ValueError("Nesting a program inside itself is not supported")
for defgate in instruction._defined_gates:
self.inst(defgate)
for instr in instruction._instructions:
self.inst(instr)
# Implementation note: these two base cases are the only ones which modify the program
elif isinstance(instruction, DefGate):
defined_gate_names = [gate.name for gate in self._defined_gates]
if instruction.name in defined_gate_names:
warnings.warn("Gate {} has already been defined in this program".format(instruction.name))
self._defined_gates.append(instruction)
elif isinstance(instruction, AbstractInstruction):
self._instructions.append(instruction)
self._synthesized_instructions = None
else:
raise TypeError("Invalid instruction: {}".format(instruction))
return self
def gate(self, name, params, qubits):
"""
Add a gate to the program.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param list params: Parameters to send to the gate.
:param list qubits: Qubits that the gate operates on.
:return: The Program instance
:rtype: Program
"""
return self.inst(Gate(name, params, [unpack_qubit(q) for q in qubits]))
def defgate(self, name, matrix, parameters=None):
"""
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
"""
return self.inst(DefGate(name, matrix, parameters))
def define_noisy_gate(self, name, qubit_indices, kraus_ops):
"""
Overload a static ideal gate with a noisy one defined in terms of a Kraus map.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param str name: The name of the gate.
:param tuple|list qubit_indices: The qubits it acts on.
:param tuple|list kraus_ops: The Kraus operators.
:return: The Program instance
:rtype: Program
"""
kraus_ops = [np.asarray(k, dtype=np.complex128) for k in kraus_ops]
_check_kraus_ops(len(qubit_indices), kraus_ops)
return self.inst(_create_kraus_pragmas(name, tuple(qubit_indices), kraus_ops))
def no_noise(self):
"""
Prevent a noisy gate definition from being applied to the immediately following Gate
instruction.
:return: Program
"""
return self.inst(Pragma("NO-NOISE"))
def measure(self, qubit_index, classical_reg=None):
"""
Measures a qubit at qubit_index and puts the result in classical_reg
:param int qubit_index: The address of the qubit to measure.
:param int classical_reg: The address of the classical bit to store the result.
:returns: The Quil Program with the appropriate measure instruction appended, e.g.
MEASURE 0 [1]
:rtype: Program
"""
return self.inst(MEASURE(qubit_index, classical_reg))
def measure_all(self, *qubit_reg_pairs):
"""
Measures many qubits into their specified classical bits, in the order
they were entered.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
"""
for qubit_index, classical_reg in qubit_reg_pairs:
self.inst(MEASURE(qubit_index, classical_reg))
return self
def while_do(self, classical_reg, q_program):
"""
While a classical register at index classical_reg is 1, loop q_program
Equivalent to the following construction:
.. code::
WHILE [c]:
instr...
=>
LABEL @START
JUMP-UNLESS @END [c]
instr...
JUMP @START
LABEL @END
:param int classical_reg: The classical register to check
:param Program q_program: The Quil program to loop.
:return: The Quil Program with the loop instructions added.
:rtype: Program
"""
label_start = LabelPlaceholder("START")
label_end = LabelPlaceholder("END")
self.inst(JumpTarget(label_start))
self.inst(JumpUnless(target=label_end, condition=Addr(classical_reg)))
self.inst(q_program)
self.inst(Jump(label_start))
self.inst(JumpTarget(label_end))
return self
def if_then(self, classical_reg, if_program, else_program=None):
"""
If the classical register at index classical reg is 1, run if_program, else run
else_program.
Equivalent to the following construction:
.. code::
IF [c]:
instrA...
ELSE:
instrB...
=>
JUMP-WHEN @THEN [c]
instrB...
JUMP @END
LABEL @THEN
instrA...
LABEL @END
:param int classical_reg: The classical register to check as the condition
:param Program if_program: A Quil program to execute if classical_reg is 1
:param Program else_program: A Quil program to execute if classical_reg is 0. This
argument is optional and defaults to an empty Program.
:returns: The Quil Program with the branching instructions added.
:rtype: Program
"""
else_program = else_program if else_program is not None else Program()
label_then = LabelPlaceholder("THEN")
label_end = LabelPlaceholder("END")
self.inst(JumpWhen(target=label_then, condition=Addr(classical_reg)))
self.inst(else_program)
self.inst(Jump(label_end))
self.inst(JumpTarget(label_then))
self.inst(if_program)
self.inst(JumpTarget(label_end))
return self
def alloc(self):
"""
Get a new qubit.
:return: A qubit.
:rtype: Qubit
"""
return QubitPlaceholder()
def out(self):
"""
Converts the Quil program to a readable string.
:return: String form of a program
:rtype: string
"""
s = ""
for dg in self._defined_gates:
s += dg.out()
s += "\n"
for instr in self.instructions:
s += instr.out() + "\n"
return s
def get_qubits(self):
"""
Returns all of the qubit indices used in this program, including gate applications and
allocated qubits. e.g.
>>> p = Program()
>>> p.inst(("H", 1))
>>> p.get_qubits()
{1}
>>> q = p.alloc()
>>> p.inst(H(q))
>>> len(p.get_qubits())
2
:return: A set of all the qubit indices used in this program
:rtype: set
"""
qubits = set()
for instr in self.instructions:
if isinstance(instr, Gate):
qubits |= {q.index for q in instr.qubits}
elif isinstance(instr, Measurement):
qubits.add(instr.qubit.index)
return qubits
def is_protoquil(self):
"""
Protoquil programs may only contain gates, no classical instructions and no jumps.
:return: True if the Program is Protoquil, False otherwise
"""
for instr in self._instructions:
if not isinstance(instr, Gate):
return False
return True
def pop(self):
"""
Pops off the last instruction.
:return: The instruction that was popped.
:rtype: tuple
"""
res = self._instructions.pop()
self._synthesized_instructions = None
return res
def dagger(self, inv_dict=None, suffix="-INV"):
"""
Creates the conjugate transpose of the Quil program. The program must not
contain any irreversible actions (measurement, control flow, qubit allocation).
:return: The Quil program's inverse
:rtype: Program
"""
if not self.is_protoquil():
raise ValueError("Program must be valid Protoquil")
daggered = Program()
for gate in self._defined_gates:
if inv_dict is None or gate.name not in inv_dict:
daggered.defgate(gate.name + suffix, gate.matrix.T.conj())
for gate in reversed(self._instructions):
if gate.name in STANDARD_GATES:
if gate.name == "S":
daggered.inst(STANDARD_GATES["PHASE"](-pi / 2, *gate.qubits))
elif gate.name == "T":
daggered.inst(STANDARD_GATES["RZ"](pi / 4, *gate.qubits))
elif gate.name == "ISWAP":
daggered.inst(STANDARD_GATES["PSWAP"](pi / 2, *gate.qubits))
else:
negated_params = list(map(lambda x: -1 * x, gate.params))
daggered.inst(STANDARD_GATES[gate.name](*(negated_params + gate.qubits)))
else:
if inv_dict is None or gate.name not in inv_dict:
gate_inv_name = gate.name + suffix
else:
gate_inv_name = inv_dict[gate.name]
daggered.inst(tuple([gate_inv_name] + gate.qubits))
return daggered
def _synthesize(self):
"""
Takes a program which may contain placeholders and assigns them all defined values.
For qubit placeholders:
1. We look through the program to find all the known indexes of qubits and add them to a set
2. We create a mapping from undefined qubits to their newly assigned index
3. For every qubit placeholder in the program, if it's not already been assigned then look through the set of
known indexes and find the lowest available one
For label placeholders:
1. Start a counter at 1
2. For every label placeholder in the program, replace it with a defined label using the counter and increment
the counter
:return: List of AbstractInstructions with all placeholders removed
"""
used_indexes = set()
for instr in self._instructions:
if isinstance(instr, Gate):
for q in instr.qubits:
if not isinstance(q, QubitPlaceholder):
used_indexes.add(q.index)
elif isinstance(instr, Measurement):
if not isinstance(instr.qubit, QubitPlaceholder):
used_indexes.add(instr.qubit.index)
def find_available_index():
# Just do a linear search.
for i in count(start=0, step=1):
if i not in used_indexes:
return i
qubit_mapping = dict()
def remap_qubit(qubit):
if not isinstance(qubit, QubitPlaceholder):
return qubit
if id(qubit) in qubit_mapping:
return qubit_mapping[id(qubit)]
else:
available_index = find_available_index()
used_indexes.add(available_index)
remapped_qubit = Qubit(available_index)
qubit_mapping[id(qubit)] = remapped_qubit
return remapped_qubit
label_mapping = dict()
label_counter = 1
def remap_label(placeholder):
if id(placeholder) in label_mapping:
return label_mapping[id(placeholder)]
else:
label = Label(placeholder.prefix + str(label_counter))
label_mapping[id(placeholder)] = label
return label
result = []
for instr in self._instructions:
# Remap qubits on Gate and Measurement instructions
if isinstance(instr, Gate):
remapped_qubits = [remap_qubit(q) for q in instr.qubits]
result.append(Gate(instr.name, instr.params, remapped_qubits))
elif isinstance(instr, Measurement):
result.append(Measurement(remap_qubit(instr.qubit), instr.classical_reg))
# Remap any label placeholders on jump or target instructions
elif isinstance(instr, Jump) and isinstance(instr.target, LabelPlaceholder):
result.append(Jump(remap_label(instr.target)))
label_counter += 1
elif isinstance(instr, JumpTarget) and isinstance(instr.label, LabelPlaceholder):
result.append(JumpTarget(remap_label(instr.label)))
label_counter += 1
elif isinstance(instr, JumpConditional) and isinstance(instr.target, LabelPlaceholder):
new_label = remap_label(instr.target)
if isinstance(instr, JumpWhen):
result.append(JumpWhen(new_label, instr.condition))
elif isinstance(instr, JumpUnless):
result.append(JumpUnless(new_label, instr.condition))
else:
raise TypeError("Encountered a JumpConditional that wasn't JumpWhen or JumpUnless: {} {}"
.format(type(instr), instr))
label_counter += 1
# Otherwise simply add it to the result
else:
result.append(instr)
return result
def __add__(self, other):
"""
Concatenate two programs together, returning a new one.
:param Program other: Another program or instruction to concatenate to this one.
:return: A newly concatenated program.
:rtype: Program
"""
p = Program()
p.inst(self)
p.inst(other)
return p
def __getitem__(self, index):
"""
Allows indexing into the program to get an action.
:param index: The action at the specified index.
:return:
"""
return self.instructions[index]
def __iter__(self):
"""
Allow built in iteration through a program's instructions, e.g. [a for a in Program(X(0))]
:return:
"""
return self.instructions.__iter__()
def __eq__(self, other):
return isinstance(other, self.__class__) and self.out() == other.out()
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._instructions)
def __str__(self):
return self.out()
def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
return sum(prog_list, Program())
|
[
"numpy.asarray",
"itertools.count",
"pyquil.quilatom.QubitPlaceholder",
"pyquil.quilatom.LabelPlaceholder",
"pyquil.quilatom.unpack_qubit"
] |
[((10548, 10573), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""START"""'], {}), "('START')\n", (10564, 10573), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((10594, 10617), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""END"""'], {}), "('END')\n", (10610, 10617), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((11926, 11950), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""THEN"""'], {}), "('THEN')\n", (11942, 11950), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((11971, 11994), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""END"""'], {}), "('END')\n", (11987, 11994), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((12408, 12426), 'pyquil.quilatom.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (12424, 12426), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((8290, 8324), 'numpy.asarray', 'np.asarray', (['k'], {'dtype': 'np.complex128'}), '(k, dtype=np.complex128)\n', (8300, 8324), True, 'import numpy as np\n'), ((17096, 17118), 'itertools.count', 'count', ([], {'start': '(0)', 'step': '(1)'}), '(start=0, step=1)\n', (17101, 17118), False, 'from itertools import count\n'), ((6551, 6566), 'pyquil.quilatom.unpack_qubit', 'unpack_qubit', (['q'], {}), '(q)\n', (6563, 6566), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n')]
|
# -*- coding: utf-8 -*-
from codecs import open # To use a consistent encoding
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-rest-params',
version='1.0.0',
description='Function decorator for Django REST Framework for specifying and constraining API parameters.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/cammsaul/django-rest-params',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
],
keywords='rest,django,api,params,parameters,djangorestframework,decorator',
packages=find_packages(exclude=['tests']),
install_requires=['django', 'djangorestframework']
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((167, 189), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'from os import path\n'), ((252, 281), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (261, 281), False, 'from os import path\n'), ((1335, 1367), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (1348, 1367), False, 'from setuptools import find_packages, setup\n')]
|
"""
There is an array with some numbers.
All numbers are equal except for one(imposter).
"""
def imposter(arr: list) -> str:
"""
>>> imposter([1,2,1,1,1,1])
2
>>> imposter(["python", "java", "python", "python"])
'java'
"""
n = []
s = set(arr)
for e in s:
if arr.count(e) == 1:
n.append(e)
return n[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"doctest.testmod"
] |
[((444, 461), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (459, 461), False, 'import doctest\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.efficientnet.utils import MemoryEfficientSwish as Swish
from model.module import DepthWiseSeparableConvModule as DWSConv
from model.module import MaxPool2dSamePad
class BiFPN(nn.Module):
"""
BiFPN block.
Depending on its order, it either accepts
seven feature maps (if this block is the first block in FPN) or
otherwise five feature maps from the output of the previous BiFPN block
"""
EPS: float = 1e-04
REDUCTION_RATIO: int = 2
def __init__(self, n_channels):
super(BiFPN, self).__init__()
self.conv_4_td = DWSConv(n_channels, n_channels, relu=False)
self.conv_5_td = DWSConv(n_channels, n_channels, relu=False)
self.conv_6_td = DWSConv(n_channels, n_channels, relu=False)
self.weights_4_td = nn.Parameter(torch.ones(2))
self.weights_5_td = nn.Parameter(torch.ones(2))
self.weights_6_td = nn.Parameter(torch.ones(2))
self.conv_3_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_4_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_5_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_6_out = DWSConv(n_channels, n_channels, relu=False)
self.conv_7_out = DWSConv(n_channels, n_channels, relu=False)
self.weights_3_out = nn.Parameter(torch.ones(2))
self.weights_4_out = nn.Parameter(torch.ones(3))
self.weights_5_out = nn.Parameter(torch.ones(3))
self.weights_6_out = nn.Parameter(torch.ones(3))
self.weights_7_out = nn.Parameter(torch.ones(2))
self.upsample = lambda x: F.interpolate(x, scale_factor=self.REDUCTION_RATIO)
self.downsample = MaxPool2dSamePad(self.REDUCTION_RATIO + 1, self.REDUCTION_RATIO)
self.act = Swish()
def forward(self, features):
if len(features) == 5:
p_3, p_4, p_5, p_6, p_7 = features
p_4_2, p_5_2 = None, None
else:
p_3, p_4, p_4_2, p_5, p_5_2, p_6, p_7 = features
# Top Down Path
p_6_td = self.conv_6_td(
self._fuse_features(
weights=self.weights_6_td,
features=[p_6, self.upsample(p_7)]
)
)
p_5_td = self.conv_5_td(
self._fuse_features(
weights=self.weights_5_td,
features=[p_5, self.upsample(p_6_td)]
)
)
p_4_td = self.conv_4_td(
self._fuse_features(
weights=self.weights_4_td,
features=[p_4, self.upsample(p_5_td)]
)
)
p_4_in = p_4 if p_4_2 is None else p_4_2
p_5_in = p_5 if p_5_2 is None else p_5_2
# Out
p_3_out = self.conv_3_out(
self._fuse_features(
weights=self.weights_3_out,
features=[p_3, self.upsample(p_4_td)]
)
)
p_4_out = self.conv_4_out(
self._fuse_features(
weights=self.weights_4_out,
features=[p_4_in, p_4_td, self.downsample(p_3_out)]
)
)
p_5_out = self.conv_5_out(
self._fuse_features(
weights=self.weights_5_out,
features=[p_5_in, p_5_td, self.downsample(p_4_out)]
)
)
p_6_out = self.conv_6_out(
self._fuse_features(
weights=self.weights_6_out,
features=[p_6, p_6_td, self.downsample(p_5_out)]
)
)
p_7_out = self.conv_7_out(
self._fuse_features(
weights=self.weights_7_out,
features=[p_7, self.downsample(p_6_out)]
)
)
return [p_3_out, p_4_out, p_5_out, p_6_out, p_7_out]
def _fuse_features(self, weights, features):
weights = F.relu(weights)
num = sum([w * f for w, f in zip(weights, features)])
det = sum(weights) + self.EPS
x = self.act(num / det)
return x
|
[
"torch.ones",
"model.module.DepthWiseSeparableConvModule",
"model.efficientnet.utils.MemoryEfficientSwish",
"model.module.MaxPool2dSamePad",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate"
] |
[((645, 688), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (652, 688), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((714, 757), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (721, 757), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((783, 826), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (790, 826), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1023, 1066), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (1030, 1066), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1093, 1136), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (1100, 1136), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1163, 1206), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (1170, 1206), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1233, 1276), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (1240, 1276), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1303, 1346), 'model.module.DepthWiseSeparableConvModule', 'DWSConv', (['n_channels', 'n_channels'], {'relu': '(False)'}), '(n_channels, n_channels, relu=False)\n', (1310, 1346), True, 'from model.module import DepthWiseSeparableConvModule as DWSConv\n'), ((1746, 1810), 'model.module.MaxPool2dSamePad', 'MaxPool2dSamePad', (['(self.REDUCTION_RATIO + 1)', 'self.REDUCTION_RATIO'], {}), '(self.REDUCTION_RATIO + 1, self.REDUCTION_RATIO)\n', (1762, 1810), False, 'from model.module import MaxPool2dSamePad\n'), ((1831, 1838), 'model.efficientnet.utils.MemoryEfficientSwish', 'Swish', ([], {}), '()\n', (1836, 1838), True, 'from model.efficientnet.utils import MemoryEfficientSwish as Swish\n'), ((3883, 3898), 'torch.nn.functional.relu', 'F.relu', (['weights'], {}), '(weights)\n', (3889, 3898), True, 'import torch.nn.functional as F\n'), ((869, 882), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (879, 882), False, 'import torch\n'), ((925, 938), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (935, 938), False, 'import torch\n'), ((981, 994), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (991, 994), False, 'import torch\n'), ((1390, 1403), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1400, 1403), False, 'import torch\n'), ((1447, 1460), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (1457, 1460), False, 'import torch\n'), ((1504, 1517), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (1514, 1517), False, 'import torch\n'), ((1561, 1574), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (1571, 1574), False, 'import torch\n'), ((1618, 1631), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1628, 1631), False, 'import torch\n'), ((1668, 1719), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'self.REDUCTION_RATIO'}), '(x, scale_factor=self.REDUCTION_RATIO)\n', (1681, 1719), True, 'import torch.nn.functional as F\n')]
|
from unittest import TestCase
import json
from pycrunchbase.resource.node import Node
from pycrunchbase.resource.utils import parse_date
class TestNode(Node):
KNOWN_PROPERTIES = ['property1', 'property2']
def _coerce_values(self):
# intentionally coerce bad values for test purposes
attr = 'property1'
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
data = {
"type": "TestNode",
"uuid": "uuid",
'properties': {
'property1': 'one',
'property2': 'two'
},
'relationships': {
'unknown': {
'paging': {},
'items': {}
}
},
}
class NodeTestCase(TestCase):
def test_node_creation_from_dict(self):
node = TestNode(data)
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
def test_node_creation_from_string(self):
node = TestNode(json.dumps(data))
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
|
[
"json.dumps"
] |
[((956, 972), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (966, 972), False, 'import json\n')]
|
import re
import html
import string
from functools import partial
from PIL import Image
from os.path import split, splitext
import random
from django.core.files.uploadedfile import InMemoryUploadedFile
from ib.models import Post, File
from django.conf import settings
from os.path import join
import subprocess
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
def validate_text(text):
""" Returns True if text exists and is more than white spaces, False otherwise."""
return bool(text) and not text.isspace()
def remove_invalid_files(files, request):
""" Returns a list with only valid files, discarding files that aren't accepted.
'blob' is sent by Dropzone.js in posts without files. """
accepted = []
for f in [i for i in files if i.name != 'blob']:
if check_file(f):
accepted.append(f)
else:
messages.error(request, f"Upload error: {f} too large or extension not allowed.")
return accepted
def validate_post(text, files, embed, thread_id, request):
valid_text = validate_text(text)
if thread_id:
if valid_text or files or embed:
return True
else:
messages.error(request, "A post must have text, a file or a YouTube embed.")
else:
if valid_text and (files or embed):
return True
elif not valid_text:
messages.error(request, "A thread must have text.")
else:
messages.error(request, "A thread must have files or a YouTube embed.")
return False
def validate_embed(embed, request):
if not embed:
return None, None, None
""" Validates an embed string. If valid, returns the url, the provider and the id, else return None. """
patterns = [
'((?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?)']
for p in patterns:
matches = re.findall(p, embed)
if matches:
return matches[0][0], "youtube", matches[0][1]
messages.error(request, 'Bad URL. Could not embed.')
return None, None, None
def prepare_text(text):
""" Escapes text, adds <span> tags for greentext, red text and spoiler text, rainbow text, big text and quotes. """
text = html.escape(text)
def spanify(m, klass):
"""Nobody expects the Spanish inquisition!
Insert span with desired class:
m - match object
klass - the class name"""
carriage_return = '\r' # In Firefox based browsers, a \r will appear at the end of a green text.
return f'<span class="{klass}">{m[1].replace(carriage_return, "")}</span>'
regexes = [('greentext', '^(>.+)$'), # > green line
('redtext', r'==(.+?)=='), # == red text ==
('spoilertext', r'\*\*(.+?)\*\*'), # ** spoiler text **
('rainbowtext', r'%%(.+?)%%'), # %% rainbow text %%
('bigtext', r'##(.+?)##'), # ## big text ##
('boldtext', r''''(.+?)''''), ] # ''' bold text '''
for name, p in regexes:
text = re.sub(p, partial(spanify, klass=name), text, flags=re.MULTILINE)
quotes = set(re.findall(r'<<\d+', text))
for q in quotes:
try:
p = Post.objects.get(pk=int(q[8:]))
text = text.replace(q, f'<a class="quote" href="/{p.board}/thread/{get_thread(p)}/#{p.id}">{q}</a>')
except Post.DoesNotExist:
continue
p = '((?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?)'
text = re.sub(p, r'\1' + ' <a class="embedButton" data-from="youtube" data-id="' + r'\2' + '">[Embed]</a>', text)
p = '(https:\/\/www\.pornhub\.com\/view_video\.php\?viewkey=(\w*))'
text = re.sub(p, r'\1' + ' <a class="embedButton" data-from="pornhub" data-id="' + r'\2' + '">[Embed]</a>', text)
return text
def create_post(post: dict, files, request, spoilers) -> int:
p = Post(**post)
p.save()
file_objs = []
for file in files:
original_name = file.name
ext = splitext(file.name)[1]
file.name = ''.join(random.choices(string.ascii_letters + string.digits, k=8)) + ext
file_entry = File(post=p, original_name=original_name,
spoiler=(original_name in spoilers) or ('all' in spoilers))
file_entry.file.name = file.name
write_file(file)
f = file_entry
f.mimetype = file.content_type
f.thumb = f.get_thumb()
f.size_str = f.get_size()
f.original_name_shortened = f.get_original_name_shortened()
if not (f.mimetype.startswith('audio/') or f.mimetype == 'application/epub+zip'):
create_thumb(f)
f.width, f.height = f.get_dimensions()
f.thumb_width, f.thumb_height = f.get_dimensions(thumb=True)
file_objs.append(file_entry)
if file_objs:
File.objects.bulk_create(file_objs)
return (post['thread'].id if post['thread'] else p.id, p)
def write_file(file: InMemoryUploadedFile):
with open(join(settings.MEDIA_ROOT, file.name), 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def create_thumb(file):
full_file_path = file.file.path
ext = splitext(full_file_path)[1]
dir, fname = split(full_file_path)
fname = 't_' + fname
max_dimension = 256
scale_string = f"scale='trunc(min(1,min({max_dimension}/iw,{max_dimension}/ih))*iw/2)*2':'trunc(min(1,min({max_dimension}/iw,{max_dimension}/ih))*ih/2)*2'"
if ext.lower() in ['.mp4', '.webm']:
subprocess.run(
['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-itsoffset', '-1', '-i', full_file_path, '-vframes', '1',
'-filter:v', scale_string,
join(dir, fname).replace(ext, '.jpg')])
elif ext.lower() == '.gif':
subprocess.run(
['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-i', full_file_path, '-vf', scale_string, '-an',
join(dir, fname)])
elif ext.lower() == '.pdf':
subprocess.run(
['pdftoppm', '-jpeg', full_file_path, join(dir, fname).replace(ext, ''), '-scale-to', '256', '-singlefile'])
else:
im = Image.open(full_file_path)
im.thumbnail((max_dimension, max_dimension))
try:
im.save(join(dir, fname))
except OSError: # OSError might happen if it's a gif renamed as jpg
im.save(join(dir, fname), format='gif') # this will prevent server error, but the thumbnail will be static
def check_file(file: InMemoryUploadedFile):
return file.size <= 100 * 1e6 and file.name.lower().endswith(
('jpeg', 'jpg', 'gif', 'png', 'webp', 'webm', 'mp4', 'mp3', 'ogg', 'flac', 'opus', 'pdf', 'epub'))
def get_thread(post):
if post.thread:
return post.thread.id
else:
return post.id
|
[
"functools.partial",
"os.path.join",
"django.contrib.messages.error",
"ib.models.Post",
"random.choices",
"PIL.Image.open",
"re.findall",
"ib.models.File",
"ib.models.File.objects.bulk_create",
"os.path.splitext",
"os.path.split",
"re.sub",
"html.escape"
] |
[((2090, 2142), 'django.contrib.messages.error', 'messages.error', (['request', '"""Bad URL. Could not embed."""'], {}), "(request, 'Bad URL. Could not embed.')\n", (2104, 2142), False, 'from django.contrib import messages\n'), ((2328, 2345), 'html.escape', 'html.escape', (['text'], {}), '(text)\n', (2339, 2345), False, 'import html\n'), ((3701, 3811), 're.sub', 're.sub', (['p', '(\'\\\\1\' + \' <a class="embedButton" data-from="youtube" data-id="\' + \'\\\\2\' +\n \'">[Embed]</a>\')', 'text'], {}), '(p, \'\\\\1\' + \' <a class="embedButton" data-from="youtube" data-id="\' +\n \'\\\\2\' + \'">[Embed]</a>\', text)\n', (3707, 3811), False, 'import re\n'), ((3892, 4002), 're.sub', 're.sub', (['p', '(\'\\\\1\' + \' <a class="embedButton" data-from="pornhub" data-id="\' + \'\\\\2\' +\n \'">[Embed]</a>\')', 'text'], {}), '(p, \'\\\\1\' + \' <a class="embedButton" data-from="pornhub" data-id="\' +\n \'\\\\2\' + \'">[Embed]</a>\', text)\n', (3898, 4002), False, 'import re\n'), ((4088, 4100), 'ib.models.Post', 'Post', ([], {}), '(**post)\n', (4092, 4100), False, 'from ib.models import Post, File\n'), ((5443, 5464), 'os.path.split', 'split', (['full_file_path'], {}), '(full_file_path)\n', (5448, 5464), False, 'from os.path import split, splitext\n'), ((1985, 2005), 're.findall', 're.findall', (['p', 'embed'], {}), '(p, embed)\n', (1995, 2005), False, 'import re\n'), ((3265, 3297), 're.findall', 're.findall', (['"""<<\\\\d+"""', 'text'], {}), "('<<\\\\d+', text)\n", (3275, 3297), False, 'import re\n'), ((4343, 4444), 'ib.models.File', 'File', ([], {'post': 'p', 'original_name': 'original_name', 'spoiler': "(original_name in spoilers or 'all' in spoilers)"}), "(post=p, original_name=original_name, spoiler=original_name in spoilers or\n 'all' in spoilers)\n", (4347, 4444), False, 'from ib.models import Post, File\n'), ((5033, 5068), 'ib.models.File.objects.bulk_create', 'File.objects.bulk_create', (['file_objs'], {}), '(file_objs)\n', (5057, 5068), False, 'from ib.models import Post, File\n'), ((5398, 5422), 'os.path.splitext', 'splitext', (['full_file_path'], {}), '(full_file_path)\n', (5406, 5422), False, 'from os.path import split, splitext\n'), ((913, 998), 'django.contrib.messages.error', 'messages.error', (['request', 'f"""Upload error: {f} too large or extension not allowed."""'], {}), "(request,\n f'Upload error: {f} too large or extension not allowed.')\n", (927, 998), False, 'from django.contrib import messages\n'), ((1224, 1300), 'django.contrib.messages.error', 'messages.error', (['request', '"""A post must have text, a file or a YouTube embed."""'], {}), "(request, 'A post must have text, a file or a YouTube embed.')\n", (1238, 1300), False, 'from django.contrib import messages\n'), ((3191, 3219), 'functools.partial', 'partial', (['spanify'], {'klass': 'name'}), '(spanify, klass=name)\n', (3198, 3219), False, 'from functools import partial\n'), ((4206, 4225), 'os.path.splitext', 'splitext', (['file.name'], {}), '(file.name)\n', (4214, 4225), False, 'from os.path import split, splitext\n'), ((5192, 5228), 'os.path.join', 'join', (['settings.MEDIA_ROOT', 'file.name'], {}), '(settings.MEDIA_ROOT, file.name)\n', (5196, 5228), False, 'from os.path import join\n'), ((1420, 1471), 'django.contrib.messages.error', 'messages.error', (['request', '"""A thread must have text."""'], {}), "(request, 'A thread must have text.')\n", (1434, 1471), False, 'from django.contrib import messages\n'), ((1498, 1569), 'django.contrib.messages.error', 'messages.error', (['request', '"""A thread must have files or a YouTube embed."""'], {}), "(request, 'A thread must have files or a YouTube embed.')\n", (1512, 1569), False, 'from django.contrib import messages\n'), ((4257, 4314), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': '(8)'}), '(string.ascii_letters + string.digits, k=8)\n', (4271, 4314), False, 'import random\n'), ((6356, 6382), 'PIL.Image.open', 'Image.open', (['full_file_path'], {}), '(full_file_path)\n', (6366, 6382), False, 'from PIL import Image\n'), ((6135, 6151), 'os.path.join', 'join', (['dir', 'fname'], {}), '(dir, fname)\n', (6139, 6151), False, 'from os.path import join\n'), ((5914, 5930), 'os.path.join', 'join', (['dir', 'fname'], {}), '(dir, fname)\n', (5918, 5930), False, 'from os.path import join\n'), ((6469, 6485), 'os.path.join', 'join', (['dir', 'fname'], {}), '(dir, fname)\n', (6473, 6485), False, 'from os.path import join\n'), ((6584, 6600), 'os.path.join', 'join', (['dir', 'fname'], {}), '(dir, fname)\n', (6588, 6600), False, 'from os.path import join\n'), ((6261, 6277), 'os.path.join', 'join', (['dir', 'fname'], {}), '(dir, fname)\n', (6265, 6277), False, 'from os.path import join\n')]
|
from django.contrib import admin
from .models import *
# Register your models here.
class DoctorAdmin(admin.ModelAdmin):
pass
admin.site.register(Doctor, DoctorAdmin)
class HospitalStaffAdmin(admin.ModelAdmin):
pass
admin.site.register(HospitalStaff, HospitalStaffAdmin)
#insurance created by prem
class InsuranceAdmin(admin.ModelAdmin):
pass
admin.site.register(Insurance, InsuranceAdmin)
class PatientAdmin(admin.ModelAdmin):
pass
admin.site.register(Patient, PatientAdmin)
class AppointmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Appointment, AppointmentAdmin)
class PatientDischargeDetailsAdmin(admin.ModelAdmin):
pass
admin.site.register(PatientDischargeDetails, PatientDischargeDetailsAdmin)
class Patient_LabTest_RecordsAdmin(admin.ModelAdmin):
pass
admin.site.register(Patient_LabTest_Records,Patient_LabTest_RecordsAdmin)
class LabTestsAdmin(admin.ModelAdmin):
pass
admin.site.register(LabTests,LabTestsAdmin)
class LabStaffAdmin(admin.ModelAdmin):
pass
admin.site.register(LabStaff,LabStaffAdmin)
class DiagnosisAdmin(admin.ModelAdmin):
pass
admin.site.register(Diagnosis, DiagnosisAdmin)
class PrescriptionAdmin(admin.ModelAdmin):
pass
admin.site.register(Prescription, PrescriptionAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((131, 171), 'django.contrib.admin.site.register', 'admin.site.register', (['Doctor', 'DoctorAdmin'], {}), '(Doctor, DoctorAdmin)\n', (150, 171), False, 'from django.contrib import admin\n'), ((226, 280), 'django.contrib.admin.site.register', 'admin.site.register', (['HospitalStaff', 'HospitalStaffAdmin'], {}), '(HospitalStaff, HospitalStaffAdmin)\n', (245, 280), False, 'from django.contrib import admin\n'), ((357, 403), 'django.contrib.admin.site.register', 'admin.site.register', (['Insurance', 'InsuranceAdmin'], {}), '(Insurance, InsuranceAdmin)\n', (376, 403), False, 'from django.contrib import admin\n'), ((452, 494), 'django.contrib.admin.site.register', 'admin.site.register', (['Patient', 'PatientAdmin'], {}), '(Patient, PatientAdmin)\n', (471, 494), False, 'from django.contrib import admin\n'), ((547, 597), 'django.contrib.admin.site.register', 'admin.site.register', (['Appointment', 'AppointmentAdmin'], {}), '(Appointment, AppointmentAdmin)\n', (566, 597), False, 'from django.contrib import admin\n'), ((662, 736), 'django.contrib.admin.site.register', 'admin.site.register', (['PatientDischargeDetails', 'PatientDischargeDetailsAdmin'], {}), '(PatientDischargeDetails, PatientDischargeDetailsAdmin)\n', (681, 736), False, 'from django.contrib import admin\n'), ((801, 875), 'django.contrib.admin.site.register', 'admin.site.register', (['Patient_LabTest_Records', 'Patient_LabTest_RecordsAdmin'], {}), '(Patient_LabTest_Records, Patient_LabTest_RecordsAdmin)\n', (820, 875), False, 'from django.contrib import admin\n'), ((924, 968), 'django.contrib.admin.site.register', 'admin.site.register', (['LabTests', 'LabTestsAdmin'], {}), '(LabTests, LabTestsAdmin)\n', (943, 968), False, 'from django.contrib import admin\n'), ((1017, 1061), 'django.contrib.admin.site.register', 'admin.site.register', (['LabStaff', 'LabStaffAdmin'], {}), '(LabStaff, LabStaffAdmin)\n', (1036, 1061), False, 'from django.contrib import admin\n'), ((1110, 1156), 'django.contrib.admin.site.register', 'admin.site.register', (['Diagnosis', 'DiagnosisAdmin'], {}), '(Diagnosis, DiagnosisAdmin)\n', (1129, 1156), False, 'from django.contrib import admin\n'), ((1210, 1262), 'django.contrib.admin.site.register', 'admin.site.register', (['Prescription', 'PrescriptionAdmin'], {}), '(Prescription, PrescriptionAdmin)\n', (1229, 1262), False, 'from django.contrib import admin\n')]
|
import discord
from collections.abc import Sequence
import json
import os
import requests
from types import SimpleNamespace
import sys
from core.errors import *
import base64
import requests
import json
def loads_to_object(json_file):
"""
Loads from a json file to a python object filling its properties with
dictionnary key
"""
return json.loads(open(json_file, "r").read(), object_hook=lambda d: SimpleNamespace(**d))
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
config = loads_to_object("config.json")
async def getchannel(bot, id):
channel = bot.get_channel(id)
if not channel:
try:
channel = await bot.fetch_channel(id)
except discord.InvalidData:
channel = None
except discord.HTTPException:
channel = None
return channel
async def getuser(bot, id):
user = bot.get_user(id)
if not user:
user = await bot.fetch_user(id)
return user
async def getguild(bot, id):
guild = bot.get_guild(id)
if not guild:
guild = await bot.fetch_guild(id)
return guild
async def send_embed(context, title, description, color=int(config.EMBED_COLOR, 16)):
embed = discord.Embed(
title=title,
description=description,
color=color
)
await context.send(embed=embed)
def upload_file_to_github(file_path, file_name, repo_name, owner, branch_name, token):
url = "https://api.github.com/repos/"+owner+'/'+repo_name+"/contents/"+file_name
headers = {
"Authorization": "token " + token,
"Accept": "application/vnd.github.v3.raw",
"Content-Type": "application/json"
}
with open(file_path, "rb") as file:
data = {
"message": "Uploaded " + file_name + " to " + branch_name,
"content": base64.b64encode(file.read()).decode("utf-8")
}
response = requests.put(url, data=json.dumps(data), headers=headers)
if response.status_code == 201:
return response.json()["content"]["html_url"]
else:
return None
|
[
"discord.Embed",
"json.dumps",
"os.path.isfile",
"types.SimpleNamespace",
"sys.exit"
] |
[((462, 491), 'os.path.isfile', 'os.path.isfile', (['"""config.json"""'], {}), "('config.json')\n", (476, 491), False, 'import os\n'), ((497, 562), 'sys.exit', 'sys.exit', (['"""\'config.json\' not found! Please add it and try again."""'], {}), '("\'config.json\' not found! Please add it and try again.")\n', (505, 562), False, 'import sys\n'), ((1283, 1347), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'description': 'description', 'color': 'color'}), '(title=title, description=description, color=color)\n', (1296, 1347), False, 'import discord\n'), ((1996, 2012), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2006, 2012), False, 'import json\n'), ((431, 451), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**d)\n', (446, 451), False, 'from types import SimpleNamespace\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 13:49:13 2021
@author: Matteo
"""
import numpy as np
import matplotlib.pyplot as plt
import PCI_o_B
from PCI_o_B import CIfile as CI
from PCI_o_B import G2file as g2
from PCI_o_B import SharedFunctions as sf
class DAM(g2.G2):
def __init__(self,FolderName,CI,nROI,tau):
super().__init__(FolderName,CI,nROI,tau)
self.n_intervals = 0
self.tauDAM= []
self.g2DAM = []
self.g2varDAM = []
def __str__(self):
#write again this stuff
str_res = '\n|---------------|'
str_res += '\n| CIbead class: '
str_res += '\n|--------------------+--------------------|'
str_res += '\n| filelist : ' + str(self.ROIfilelist)
str_res += '\n| folder : ' + str(self.FolderName)
str_res += '\n| number of ROIs : ' + str(self.nROI)
str_res += '\n| ROIs size : ' + str(self.GetROIsize()) + ' px'
str_res += '\n| lag time : ' + str(self.lag)
str_res += '\n| x for theta(x)= 90° : ' + str(self.Center) + 'px'
str_res += '\n| Radius bead : ' + str(self.Center) +'px'
#str_res += '\n| Window of interest top : ' + str(self.GetWINDOWtop()) + ' px'
str_res += '\n|--------------------+--------------------|'
return str_res
def DAMCalculation(self,n_intervals):
self.n_intervals = n_intervals
l_intervals = int(len(self.CI[0]) / n_intervals )
time_list = []
for i in range(n_intervals):
time_list.append(i*l_intervals)
#calculation of the g2 for each roi for each interval
for i in range(n_intervals-1):
super().G2Calculation(time_list[i],time_list[i+1])
self.g2DAM.append(self.g2)
self.tauDAM.append(np.asarray(self.tau))
self.g2varDAM.append(self.g2var)
self.g2 = []
self.g2var = []
#self.tau = []
super().G2Calculation(time_list[-1],len(self.CI[0]))
self.g2DAM.append(self.g2)
self.g2varDAM.append(self.g2var)
self.tauDAM.append(np.asarray(self.tau))
'''
for i in range(n_intervals):
self.tauDAM[i].tolist()
print(type(self.tauDAM[i]))
print(len(self.tauDAM[i]))
'''
return
def DAMFitSingleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitDoubleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitSingleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitDoubleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
|
[
"numpy.asarray"
] |
[((2301, 2321), 'numpy.asarray', 'np.asarray', (['self.tau'], {}), '(self.tau)\n', (2311, 2321), True, 'import numpy as np\n'), ((1952, 1972), 'numpy.asarray', 'np.asarray', (['self.tau'], {}), '(self.tau)\n', (1962, 1972), True, 'import numpy as np\n')]
|
from app.index import bp
from flask import render_template
@bp.route("/")
def index():
"""View function for the Index page
Returns:
str: HTML template for the Index page
"""
return render_template("index.html")
|
[
"app.index.bp.route",
"flask.render_template"
] |
[((62, 75), 'app.index.bp.route', 'bp.route', (['"""/"""'], {}), "('/')\n", (70, 75), False, 'from app.index import bp\n'), ((208, 237), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (223, 237), False, 'from flask import render_template\n')]
|
import telebot
import os
import json
if "TOKEN" in os.environ:
bot = telebot.TeleBot(os.environ["TOKEN"])
heroku = True
else:
with open("../token2.json") as token:
heroku = False
bot = telebot.TeleBot(json.loads(token.read())["token"])
|
[
"telebot.TeleBot"
] |
[((74, 110), 'telebot.TeleBot', 'telebot.TeleBot', (["os.environ['TOKEN']"], {}), "(os.environ['TOKEN'])\n", (89, 110), False, 'import telebot\n')]
|
# ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
from configuration import Configuration
# Default (very simple) compiler configuration
class DefaultConfiguration(Configuration):
def __init__(self, context):
Configuration.__init__(self, context)
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _c99_test_program(self):
return """
// Some headers found in C99.
#include <stdbool.h>
#include <stdint.h>
int main()
{
struct foo
{
bool b; // C99 type
int i;
uint64_t q; // C99 type
};
// Designated initializer.
struct foo bar = { .b = false, .q = UINT64_MAX };
// Implicitly initialized field.
return bar.i != 0;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# The default configuration assumes that no flag is needed to
# enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
return []
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _cxx11_test_program(self):
return """
int main()
{
int x = 3210;
auto f = [x](){
return x;
};
return f() != x;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# The default configuration assumes that no flag is needed to
# enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
return []
|
[
"configuration.Configuration.__init__"
] |
[((905, 942), 'configuration.Configuration.__init__', 'Configuration.__init__', (['self', 'context'], {}), '(self, context)\n', (927, 942), False, 'from configuration import Configuration\n')]
|
__author__ = "<NAME>"
import numpy as np
from tensorflow import keras
from sktime_dl.classification._classifier import BaseDeepClassifier
from sktime_dl.networks._lstmfcn import LSTMFCNNetwork
from sktime_dl.utils import check_and_clean_data, \
check_and_clean_validation_data
from sktime_dl.utils import check_is_fitted
from sklearn.utils import check_random_state
class LSTMFCNClassifier(BaseDeepClassifier, LSTMFCNNetwork):
"""
Implementation of LSTMFCNClassifier from Karim et al (2019). [1]_
Overview:
Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the LSTM which the
author indicates provides improved performance.
Parameters
----------
nb_epochs: int, default=1500
the number of epochs to train the model
param batch_size: int, default=128
the number of samples per gradient update.
kernel_sizes: list of ints, default=[8, 5, 3]
specifying the length of the 1D convolution windows
filter_sizes: int, list of ints, default=[128, 256, 128]
size of filter for each conv layer
num_cells: int, default=8
output dimension for LSTM layer
dropout: float, default=0.8
controls dropout rate of LSTM layer
attention: boolean, default=False
If True, uses custom attention LSTM layer
callbacks: keras callbacks, default=ReduceLRonPlateau
Keras callbacks to use such as learning rate reduction or saving best model based on validation error
random_state: int,
seed to any needed random actions
verbose: boolean,
whether to output extra information
model_name: string,
the name of this model for printing and file writing purposes
model_save_directory: string,
if not None; location to save the trained keras model in hdf5 format
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
nb_classes : int
Number of classes. Extracted from the data.
References
----------
@article{Karim_2019,
title={Multivariate LSTM-FCNs for time series classification},
volume={116},
ISSN={0893-6080},
url={http://dx.doi.org/10.1016/j.neunet.2019.04.014},
DOI={10.1016/j.neunet.2019.04.014},
journal={Neural Networks},
publisher={Elsevier BV},
author={<NAME> and <NAME> and <NAME> and <NAME>},
year={2019},
month={Aug},
pages={237–245}
}
Example
-------
from sktime_dl.classification import LSTMFCNClassifier
from sktime.datasets import load_italy_power_demand
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
clf = LSTMFCNClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
"""
def __init__(
self,
nb_epochs=1500,
batch_size=8,
kernel_sizes=[8, 5, 3],
filter_sizes=[128, 256, 128],
num_cells=8,
dropout=0.8,
attention=False,
callbacks=[],
random_state=0,
verbose=False,
model_name="lstmfcn",
model_save_directory=None,
):
super(LSTMFCNClassifier, self).__init__(
model_name=model_name, model_save_directory=model_save_directory
)
self.verbose = verbose
self._is_fitted = False
# calced in fit
self.classes_ = None
self.nb_classes = -1
self.input_shape = None
self.model = None
self.history = None
# predefined
self.nb_epochs = nb_epochs
self.batch_size = batch_size
self.kernel_sizes = kernel_sizes
self.filter_sizes = filter_sizes
self.NUM_CELLS=num_cells
self.dropout=dropout
self.attention=attention
self.callbacks = callbacks
self.random_state = random_state
self.verbose = verbose
self._is_fitted = False
def build_model(self, input_shape, nb_classes, **kwargs):
"""
Construct a compiled, un-trained, keras model that is ready for
training
----------
input_shape : tuple
The shape of the data fed into the input layer
nb_classes: int
The number of classes, which shall become the size of the output
layer
Returns
-------
output : a compiled Keras Model
"""
input_layers, output_layer = self.build_network(input_shape, **kwargs)
output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
output_layer
)
model = keras.models.Model(inputs=input_layers, outputs=output_layer)
model.compile(
loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"],
)
# file_path = self.output_directory + 'best_model.hdf5'
# model_checkpoint = keras.callbacks.ModelCheckpoint(
# filepath=file_path, monitor='val_loss',
# save_best_only=True)
# self.callbacks = [model_checkpoint]
if self.callbacks==None:
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.7,
patience=50, min_lr=0.0001)
self.callbacks = [reduce_lr]
else:
pass
return model
def fit(self, X, y, input_checks=True, validation_X=None,
validation_y=None, **kwargs):
"""
Fit the classifier on the training set (X, y)
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
y : array-like, shape = [n_instances]
The training data class labels.
input_checks : boolean
whether to check the X and y parameters
validation_X : a nested pd.Dataframe, or array-like of shape =
(n_instances, series_length, n_dimensions)
The validation samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
Unless strictly defined by the user via callbacks (such as
EarlyStopping), the presence or state of the validation
data does not alter training in any way. Predictions at each epoch
are stored in the model's fit history.
validation_y : array-like, shape = [n_instances]
The validation class labels.
Returns
-------
self : object
"""
self.random_state = check_random_state(self.random_state)
X = check_and_clean_data(X, y, input_checks=input_checks)
y_onehot = self.convert_y(y)
validation_data = \
check_and_clean_validation_data(validation_X, validation_y,
self.label_encoder,
self.onehot_encoder)
# ignore the number of instances, X.shape[0],
# just want the shape of each instance
self.input_shape = X.shape[1:]
if validation_data is not None:
validation_data = (
validation_data[0],
validation_data[1]
)
self.model = self.build_model(self.input_shape, self.nb_classes)
if self.verbose:
self.model.summary()
self.history = self.model.fit(
X,
y_onehot,
batch_size=self.batch_size,
epochs=self.nb_epochs,
verbose=self.verbose,
validation_data=(validation_data),
callbacks=self.callbacks,
)
self.save_trained_model()
self._is_fitted = True
return self
def predict_proba(self, X, input_checks=True, **kwargs):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
input_checks: boolean
whether to check the X parameter
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
check_is_fitted(self)
X = check_and_clean_data(X, input_checks=input_checks)
probs = self.model.predict(X, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
|
[
"sklearn.utils.check_random_state",
"sktime_dl.utils.check_is_fitted",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.hstack",
"tensorflow.keras.models.Model",
"sktime_dl.utils.check_and_clean_data",
"sktime_dl.utils.check_and_clean_validation_data"
] |
[((4872, 4933), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layers', 'outputs': 'output_layer'}), '(inputs=input_layers, outputs=output_layer)\n', (4890, 4933), False, 'from tensorflow import keras\n'), ((6945, 6982), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (6963, 6982), False, 'from sklearn.utils import check_random_state\n'), ((6996, 7049), 'sktime_dl.utils.check_and_clean_data', 'check_and_clean_data', (['X', 'y'], {'input_checks': 'input_checks'}), '(X, y, input_checks=input_checks)\n', (7016, 7049), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((7128, 7233), 'sktime_dl.utils.check_and_clean_validation_data', 'check_and_clean_validation_data', (['validation_X', 'validation_y', 'self.label_encoder', 'self.onehot_encoder'], {}), '(validation_X, validation_y, self.\n label_encoder, self.onehot_encoder)\n', (7159, 7233), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((8753, 8774), 'sktime_dl.utils.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (8768, 8774), False, 'from sktime_dl.utils import check_is_fitted\n'), ((8788, 8838), 'sktime_dl.utils.check_and_clean_data', 'check_and_clean_data', (['X'], {'input_checks': 'input_checks'}), '(X, input_checks=input_checks)\n', (8808, 8838), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((4766, 4818), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['nb_classes'], {'activation': '"""softmax"""'}), "(nb_classes, activation='softmax')\n", (4784, 4818), False, 'from tensorflow import keras\n'), ((5396, 5489), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.7)', 'patience': '(50)', 'min_lr': '(0.0001)'}), "(monitor='loss', factor=0.7, patience=50,\n min_lr=0.0001)\n", (5429, 5489), False, 'from tensorflow import keras\n'), ((9062, 9091), 'numpy.hstack', 'np.hstack', (['[1 - probs, probs]'], {}), '([1 - probs, probs])\n', (9071, 9091), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train a detection model from an already prepared dataset.
"""
import argparse
import logging
import os
import random
import sys
import warnings
import rasterio
from aplatam import __version__
from aplatam.build_trainset import CnnTrainsetBuilder
from aplatam.train_classifier import train
from aplatam.util import all_raster_files
__author__ = "<NAME>"
__copyright__ = __author__
__license__ = "new-bsd"
_logger = logging.getLogger(__name__)
# Number of bands that all rasters must have
BAND_COUNT = 4
# Default output model filename
DEFAULT_MODEL_FILENAME = 'model.h5'
def parse_args(args):
"""
Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=('Prepare a dataset from a set of preprocessed rasters '
'and a vector file of polygons and train a detection '
'model.'))
# Mandatory arguments
parser.add_argument(
'rasters_dir', help='directory containing raster images')
parser.add_argument('vector', help='vector file of training polygons')
parser.add_argument(
'output_dir', help='directory of output training dataset')
# Options
parser.add_argument(
'-o',
'--output-model',
default=None,
help=('filename for output model. '
'Default: OUTPUT_DIR/model.h5'))
parser.add_argument(
'--seed', type=int, help='seed number for the random number generator')
parser.add_argument("--size", type=int, default=256, help="window size")
parser.add_argument(
"--step-size",
type=int,
default=128,
help="step size for sliding window")
parser.add_argument(
"--buffer-size",
type=int,
default=0,
help=
"if buffer_size > 0, polygons are expanded with a fixed-sized buffer")
parser.add_argument(
"--rasters-contour",
help="path to rasters contour vector file (optional)")
parser.add_argument(
"--rescale-intensity",
dest='rescale_intensity',
default=True,
action='store_true',
help="rescale intensity")
parser.add_argument(
"--no-rescale-intensity",
dest='rescale_intensity',
action='store_false',
help="do not rescale intensity")
parser.add_argument(
"--lower-cut",
type=int,
default=2,
help=
"lower cut of percentiles for cumulative count in intensity rescaling")
parser.add_argument(
"--upper-cut",
type=int,
default=98,
help=
"upper cut of percentiles for cumulative count in intensity rescaling")
parser.add_argument(
"--block-size", type=int, default=1, help="block size multiplier")
parser.add_argument(
"--test-size",
type=float,
default=0.25,
help=("proportion of the dataset to include in the test split. "
"Float number between 0.0 and 1.0"))
parser.add_argument(
"--balancing-multiplier",
type=float,
default=1.0,
help=
"proportion of false samples w.r.t true samples (e.g. 1.0 = 50%% true, 50%% false)"
)
parser.add_argument(
"--trainable-layers",
type=int,
default=5,
help="number of upper layers of ResNet-50 to retrain")
parser.add_argument("--batch-size", type=int, default=5, help="Batch size")
parser.add_argument(
"--epochs", type=int, default=20, help="number of epochs to run")
parser.add_argument(
'--version',
action='version',
version='aplatam {ver}'.format(ver=__version__))
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""
Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""
Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
# Set default output model path, if not set
if args.output_model:
output_model = args.output_model
else:
output_model = os.path.join(args.output_dir, DEFAULT_MODEL_FILENAME)
opts = dict(
size=args.size,
step_size=args.step_size,
buffer_size=args.buffer_size,
rescale_intensity=args.rescale_intensity,
lower_cut=args.lower_cut,
upper_cut=args.upper_cut,
block_size=args.block_size,
test_size=args.test_size,
balancing_multiplier=args.balancing_multiplier,
rasters_contour=args.rasters_contour)
_logger.info('Options: %s', opts)
# Set seed number
if args.seed:
_logger.info('Seed: %d', args.seed)
random.seed(args.seed)
_logger.info('Collect all rasters from %s', args.rasters_dir)
rasters = all_raster_files(args.rasters_dir)
validate_rasters_band_count(rasters)
if not os.path.exists(args.output_dir):
builder = CnnTrainsetBuilder(rasters, args.vector, **opts)
builder.build(args.output_dir)
# Train and save model
train(
output_model,
args.output_dir,
trainable_layers=args.trainable_layers,
batch_size=args.batch_size,
epochs=args.epochs,
size=args.size)
_logger.info('Done')
def validate_rasters_band_count(rasters):
"""Validate all rasters have at least 3 bands
Returns True if they are all valid.
Otherwise it raises a RuntimeError.
"""
_logger.debug('Validate rasters band count')
for raster_path in rasters:
count = get_raster_band_count(raster_path)
if count < 3:
raise RuntimeError(
'Raster {} has {} bands, but should have 3 (true color RGB)'.
format(raster_path, count))
if count >= 3:
warnings.warn(
('Raster {} has more than 3 bands ({}). '
'Going to assume the first 3 bands are RGB...').format(
raster_path, count))
return True
def get_raster_band_count(raster_path):
"""Return band count of +raster_path+"""
with rasterio.open(raster_path) as dataset:
return dataset.count
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
[
"rasterio.open",
"argparse.ArgumentParser",
"logging.basicConfig",
"aplatam.build_trainset.CnnTrainsetBuilder",
"os.path.exists",
"aplatam.util.all_raster_files",
"random.seed",
"aplatam.train_classifier.train",
"os.path.join",
"logging.getLogger"
] |
[((469, 496), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (486, 496), False, 'import logging\n'), ((870, 1091), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Prepare a dataset from a set of preprocessed rasters and a vector file of polygons and train a detection model."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Prepare a dataset from a set of preprocessed rasters and a vector file of polygons and train a detection model.'\n )\n", (893, 1091), False, 'import argparse\n'), ((4541, 4646), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel', 'stream': 'sys.stdout', 'format': 'logformat', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=loglevel, stream=sys.stdout, format=logformat,\n datefmt='%Y-%m-%d %H:%M:%S')\n", (4560, 4646), False, 'import logging\n'), ((5718, 5752), 'aplatam.util.all_raster_files', 'all_raster_files', (['args.rasters_dir'], {}), '(args.rasters_dir)\n', (5734, 5752), False, 'from aplatam.util import all_raster_files\n'), ((5978, 6122), 'aplatam.train_classifier.train', 'train', (['output_model', 'args.output_dir'], {'trainable_layers': 'args.trainable_layers', 'batch_size': 'args.batch_size', 'epochs': 'args.epochs', 'size': 'args.size'}), '(output_model, args.output_dir, trainable_layers=args.trainable_layers,\n batch_size=args.batch_size, epochs=args.epochs, size=args.size)\n', (5983, 6122), False, 'from aplatam.train_classifier import train\n'), ((5025, 5078), 'os.path.join', 'os.path.join', (['args.output_dir', 'DEFAULT_MODEL_FILENAME'], {}), '(args.output_dir, DEFAULT_MODEL_FILENAME)\n', (5037, 5078), False, 'import os\n'), ((5614, 5636), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (5625, 5636), False, 'import random\n'), ((5807, 5838), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (5821, 5838), False, 'import os\n'), ((5858, 5906), 'aplatam.build_trainset.CnnTrainsetBuilder', 'CnnTrainsetBuilder', (['rasters', 'args.vector'], {}), '(rasters, args.vector, **opts)\n', (5876, 5906), False, 'from aplatam.build_trainset import CnnTrainsetBuilder\n'), ((7021, 7047), 'rasterio.open', 'rasterio.open', (['raster_path'], {}), '(raster_path)\n', (7034, 7047), False, 'import rasterio\n')]
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Approval",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created",
models.DateTimeField(auto_now_add=True, verbose_name="opprettet"),
),
(
"processed",
models.BooleanField(
default=False, verbose_name="behandlet", editable=False
),
),
(
"processed_date",
models.DateTimeField(
null=True, verbose_name="behandlet dato", blank=True
),
),
(
"approved",
models.BooleanField(
default=False, verbose_name="godkjent", editable=False
),
),
("message", models.TextField(verbose_name="melding")),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="MembershipApproval",
fields=[
(
"approval_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="approval.Approval",
on_delete=models.CASCADE,
),
),
(
"new_expiry_date",
models.DateField(
null=True, verbose_name="ny utl\xf8psdato", blank=True
),
),
(
"field_of_study",
models.SmallIntegerField(
default=0,
verbose_name="studieretning",
choices=[
(0, "Gjest"),
(1, "Bachelor i Informatikk (BIT)"),
(10, "Software (SW)"),
(11, "Informasjonsforvaltning (DIF)"),
(12, "Komplekse Datasystemer (KDS)"),
(13, "Spillteknologi (SPT)"),
(14, "Intelligente Systemer (IRS)"),
(15, "Helseinformatikk (MSMEDTEK)"),
(30, "<NAME>"),
(80, "PhD"),
(90, "International"),
(100, "<NAME>"),
],
),
),
(
"started_date",
models.DateField(
null=True, verbose_name="startet dato", blank=True
),
),
],
options={
"verbose_name": "medlemskapss\xf8knad",
"verbose_name_plural": "medlemskapss\xf8knader",
"permissions": (
("view_membershipapproval", "View membership approval"),
),
},
bases=("approval.approval",),
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.SmallIntegerField",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((297, 390), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (313, 390), False, 'from django.db import migrations, models\n'), ((595, 660), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""opprettet"""'}), "(auto_now_add=True, verbose_name='opprettet')\n", (615, 660), False, 'from django.db import migrations, models\n'), ((752, 828), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""behandlet"""', 'editable': '(False)'}), "(default=False, verbose_name='behandlet', editable=False)\n", (771, 828), False, 'from django.db import migrations, models\n'), ((971, 1045), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""behandlet dato"""', 'blank': '(True)'}), "(null=True, verbose_name='behandlet dato', blank=True)\n", (991, 1045), False, 'from django.db import migrations, models\n'), ((1182, 1257), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""godkjent"""', 'editable': '(False)'}), "(default=False, verbose_name='godkjent', editable=False)\n", (1201, 1257), False, 'from django.db import migrations, models\n'), ((1352, 1392), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""melding"""'}), "(verbose_name='melding')\n", (1368, 1392), False, 'from django.db import migrations, models\n'), ((1646, 1792), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""approval.Approval"""', 'on_delete': 'models.CASCADE'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='approval.Approval', on_delete=models.CASCADE)\n", (1666, 1792), False, 'from django.db import migrations, models\n'), ((2053, 2122), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'verbose_name': '"""ny utløpsdato"""', 'blank': '(True)'}), "(null=True, verbose_name='ny utløpsdato', blank=True)\n", (2069, 2122), False, 'from django.db import migrations, models\n'), ((2268, 2690), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(0)', 'verbose_name': '"""studieretning"""', 'choices': "[(0, 'Gjest'), (1, 'Bachelor i Informatikk (BIT)'), (10, 'Software (SW)'),\n (11, 'Informasjonsforvaltning (DIF)'), (12,\n 'Komplekse Datasystemer (KDS)'), (13, 'Spillteknologi (SPT)'), (14,\n 'Intelligente Systemer (IRS)'), (15, 'Helseinformatikk (MSMEDTEK)'), (\n 30, '<NAME>'), (80, 'PhD'), (90, 'International'), (100, '<NAME>')]"}), "(default=0, verbose_name='studieretning', choices=[\n (0, 'Gjest'), (1, 'Bachelor i Informatikk (BIT)'), (10, 'Software (SW)'\n ), (11, 'Informasjonsforvaltning (DIF)'), (12,\n 'Komplekse Datasystemer (KDS)'), (13, 'Spillteknologi (SPT)'), (14,\n 'Intelligente Systemer (IRS)'), (15, 'Helseinformatikk (MSMEDTEK)'), (\n 30, '<NAME>'), (80, 'PhD'), (90, 'International'), (100, '<NAME>')])\n", (2292, 2690), False, 'from django.db import migrations, models\n'), ((3220, 3288), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'verbose_name': '"""startet dato"""', 'blank': '(True)'}), "(null=True, verbose_name='startet dato', blank=True)\n", (3236, 3288), False, 'from django.db import migrations, models\n')]
|
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Suite for loading OpenAI `Safety Gym <https://openai.com/blog/safety-gym/>`_ environments.
**NOTE**: Mujoco requires separated installation.
(gym >= 0.10, and mujoco>=1.50)
Follow the instructions at:
https://github.com/openai/mujoco-py
Several general facts about the provided benchmark environments:
1. All have distance-based dense rewards (can be customized to be sparse).
2. All have continual goals: after reaching a goal, the goal is reset but the
layout keeps the same until timeout (can be customized to not reset goals).
3. Layouts are randomized before episodes begin
4. Costs are indicator binaries (0 or 1). Every positive cost will be binarized
to 1. Thus the total cost will be 1 if any component cost is positive.
5. level 0 has no constraints; level 1 has some unsafe elements; level 2 has
very dense unsafe elements.
See https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L97
for a complete list of default configurations.
"""
try:
import mujoco_py
import safety_gym
except ImportError:
mujoco_py = None
safety_gym = None
import numpy as np
import copy
import gym
import alf
from alf.environments import suite_gym
from alf.environments.alf_wrappers import NonEpisodicAgent
def is_available():
"""Check if both ``mujoco_py`` and ``safety_gym`` have been installed."""
return (mujoco_py is not None and safety_gym is not None)
class VisionObservationWrapper(gym.ObservationWrapper):
"""If the observation is a dict and it contains a key 'vision',
return an uint8 RGB image in [0,255] and a flat vector containing any other
info."""
def __init__(self, env):
super().__init__(env)
self._vision = False
if (isinstance(self.observation_space, gym.spaces.Dict)
and 'vision' in self.observation_space.spaces):
self._vision = True
observation_space = {}
observation_space['vision'] = self.observation_space['vision']
self.obs_flat_size = sum([
np.prod(i.shape)
for (k, i) in self.observation_space.spaces.items()
if k != 'vision'
])
observation_space['robot'] = gym.spaces.Box(
-np.inf, np.inf, (self.obs_flat_size, ), dtype=np.float32)
self.observation_space = gym.spaces.Dict(observation_space)
def observation(self, observation):
if self._vision:
obs = {"vision": observation["vision"]}
flat_obs = np.zeros(self.obs_flat_size)
offset = 0
for k in sorted(observation.keys()):
if k == 'vision':
continue
k_size = np.prod(observation[k].shape)
flat_obs[offset:offset + k_size] = observation[k].flat
offset += k_size
obs['robot'] = flat_obs
return obs
return observation
class CompleteEnvInfo(gym.Wrapper):
"""Always set the complete set of information so that the env info has a
fixed shape (no matter whether some event occurs or not), which is required
by ALF.
The current safety gym env only adds a key to env info when the corresponding
event is triggered, see:
https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1242
"""
def __init__(self, env, env_name):
super().__init__(env)
# env info keys are retrieved from:
# https://github.com/openai/safety-gym/blob/master/safety_gym/envs/engine.py
self._env_info_keys = [
'cost_exception',
'goal_met',
'cost' # this is the summed overall cost
]
if not self._is_level0_env(env_name):
# for level 1 and 2 envs, there are constraints cost info
self._env_info_keys += [
'cost_vases_contact', 'cost_pillars', 'cost_buttons',
'cost_gremlins', 'cost_vases_displace', 'cost_vases_velocity',
'cost_hazards'
]
self._default_env_info = self._generate_default_env_info()
def _is_level0_env(self, env_name):
return "0-v" in env_name
def _generate_default_env_info(self):
env_info = {}
for key in self._env_info_keys:
if key == "goal_met":
env_info[key] = False
else:
env_info[key] = np.float32(0.)
return env_info
def step(self, action):
"""Take a step through the environment the returns the complete set of
env info, regardless of whether the corresponding event is enabled or not.
"""
env_info = copy.copy(self._default_env_info)
obs, reward, done, info = self.env.step(action)
env_info.update(info)
return obs, reward, done, env_info
class VectorReward(gym.Wrapper):
"""This wrapper makes the env returns a reward vector of length 3. The three
dimensions are:
1. distance-improvement reward indicating the delta smaller distances of
agent<->box and box<->goal for "push" tasks, or agent<->goal for
"goal"/"button" tasks.
2. negative binary cost where -1 means that at least one constraint has been
violated at the current time step (constraints vary depending on env
configurations).
3. a success indicator where 1 means the goal is met at the current step
All rewards are the higher the better.
"""
REWARD_DIMENSION = 2
def __init__(self, env, sparse_reward):
super().__init__(env)
self._reward_space = gym.spaces.Box(
low=-float('inf'),
high=float('inf'),
shape=[self.REWARD_DIMENSION])
self._sparse_reward = sparse_reward
def step(self, action):
"""Take one step through the environment and obtains several rewards.
Args:
action (np.array):
Returns:
tuple:
- obs (np.array): a flattened observation vector that contains
all enabled sensors' data
- rewards (np.array): a reward vector of length ``REWARD_DIMENSION``.
See the class docstring for their meanings.
- done (bool): whether the episode has ended
- info (dict): a dict of additional env information
"""
obs, reward, done, info = self.env.step(action)
# Get the second and third reward from ``info``
cost_reward = -info["cost"]
success_reward = float(info["goal_met"])
if self._sparse_reward:
reward = success_reward
return obs, np.array([reward, cost_reward],
dtype=np.float32), done, info
@property
def reward_space(self):
return self._reward_space
@alf.configurable(blacklist=['env'])
class RGBRenderWrapper(gym.Wrapper):
"""A ``metadata`` field should've been defined in the original safety gym env;
otherwise video recording will be disabled. See
https://github.com/openai/gym/blob/master/gym/wrappers/monitoring/video_recorder.py#L41
Also the original env needs a ``camera_id`` if "rgb_array" mode is used for
rendering, which is incompatible with our ``ALFEnvironment`` interfaces.
Here we wrap ``render()`` with a customizable camera mode.
"""
_metadata = {'render.modes': ["rgb_array", "human"]}
def __init__(self, env, width=800, height=800, camera_mode="fixedfar"):
"""
Args:
width (int): the width of rgb image
height (int): the height of rbg image
camera_mode (str): one of ('fixednear', 'fixedfar', 'fixedtop', 'vision', 'track', 'top')
"""
super().__init__(env)
# self.metadata will first inherit subclass's metadata
self.metadata.update(self._metadata)
self._width = width
self._height = height
self._camera_mode = camera_mode
def render(self, mode="human"):
camera_id = self.unwrapped.model.camera_name2id(self._camera_mode)
render_kwargs = dict(mode=mode, camera_id=camera_id)
if self._width is not None:
render_kwargs["width"] = self._width
if self._height is not None:
render_kwargs["height"] = self._height
return self.env.render(**render_kwargs)
@alf.configurable
class EpisodicWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if info["goal_met"]:
done = True
#print("xy: [%s,%s]" % (info['xy'][0], info['xy'][1]))
return obs, reward, done, info
def reset(self):
#print("xy: reset")
return self.env.reset()
@alf.configurable
def load(environment_name,
env_id=None,
discount=1.0,
max_episode_steps=None,
unconstrained=False,
sparse_reward=False,
episodic=False,
gym_env_wrappers=(),
alf_env_wrappers=()):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a ``TimeLimit`` wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
env_id: A scalar ``Tensor`` of the environment ID of the time step.
discount: Discount to use for the environment.
max_episode_steps: If None the ``max_episode_steps`` will be set to
the default step limit -1 defined in the environment. If 0, no
``TimeLimit`` wrapper will be used.
unconstrained (bool): if True, the suite will be used just as an
unconstrained environment. The reward will always be scalar without
including constraints.
sparse_reward (bool): If True, only give reward when reaching a goal.
episodic (bool): whether terminate the episode when a goal is achieved.
Note that if True, both ``EpisodicWrapper`` and ``NonEpisodicAgent``
wrapper will be used to simulate an infinite horizon even though the
success rate is computed on per-goal basis. This is for approximating
an average constraint reward objective. ``EpisodicWrapper`` first
returns ``done=True`` to signal the end of an episode, and ``NonEpisodicAgent``
replaces ``discount=0`` with ``discount=1``.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
alf_env_wrappers: Iterable with references to wrapper classes to use on
the torch environment.
Returns:
AlfEnvironment:
"""
# We can directly make the env here because none of the safety gym tasks
# is registered with a ``max_episode_steps`` argument (the
# ``gym.wrappers.time_limit.TimeLimit`` won't be applied). But each task
# will inherently manage the time limit through ``env.num_steps``.
env = gym.make(environment_name)
# fill all env info with default values
env = CompleteEnvInfo(env, environment_name)
# make vector reward
if not unconstrained:
env = VectorReward(env, sparse_reward)
env = RGBRenderWrapper(env)
if episodic:
env = EpisodicWrapper(env)
alf_env_wrappers = alf_env_wrappers + (NonEpisodicAgent, )
env = VisionObservationWrapper(env)
# Have to -1 on top of the original env max steps here, because the
# underlying gym env will output ``done=True`` when reaching the time limit
# ``env.num_steps`` (before the ``AlfGymWrapper``), which is incorrect:
# https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1302
if max_episode_steps is None:
max_episode_steps = env.num_steps - 1
max_episode_steps = min(env.num_steps - 1, max_episode_steps)
return suite_gym.wrap_env(
env,
env_id=env_id,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
alf_env_wrappers=alf_env_wrappers)
|
[
"alf.configurable",
"gym.make",
"numpy.float32",
"copy.copy",
"numpy.zeros",
"numpy.array",
"gym.spaces.Box",
"alf.environments.suite_gym.wrap_env",
"numpy.prod",
"gym.spaces.Dict"
] |
[((7467, 7502), 'alf.configurable', 'alf.configurable', ([], {'blacklist': "['env']"}), "(blacklist=['env'])\n", (7483, 7502), False, 'import alf\n'), ((11739, 11765), 'gym.make', 'gym.make', (['environment_name'], {}), '(environment_name)\n', (11747, 11765), False, 'import gym\n'), ((12665, 12839), 'alf.environments.suite_gym.wrap_env', 'suite_gym.wrap_env', (['env'], {'env_id': 'env_id', 'discount': 'discount', 'max_episode_steps': 'max_episode_steps', 'gym_env_wrappers': 'gym_env_wrappers', 'alf_env_wrappers': 'alf_env_wrappers'}), '(env, env_id=env_id, discount=discount, max_episode_steps\n =max_episode_steps, gym_env_wrappers=gym_env_wrappers, alf_env_wrappers\n =alf_env_wrappers)\n', (12683, 12839), False, 'from alf.environments import suite_gym\n'), ((5349, 5382), 'copy.copy', 'copy.copy', (['self._default_env_info'], {}), '(self._default_env_info)\n', (5358, 5382), False, 'import copy\n'), ((2864, 2936), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(self.obs_flat_size,)'], {'dtype': 'np.float32'}), '(-np.inf, np.inf, (self.obs_flat_size,), dtype=np.float32)\n', (2878, 2936), False, 'import gym\n'), ((2992, 3026), 'gym.spaces.Dict', 'gym.spaces.Dict', (['observation_space'], {}), '(observation_space)\n', (3007, 3026), False, 'import gym\n'), ((3169, 3197), 'numpy.zeros', 'np.zeros', (['self.obs_flat_size'], {}), '(self.obs_flat_size)\n', (3177, 3197), True, 'import numpy as np\n'), ((7296, 7345), 'numpy.array', 'np.array', (['[reward, cost_reward]'], {'dtype': 'np.float32'}), '([reward, cost_reward], dtype=np.float32)\n', (7304, 7345), True, 'import numpy as np\n'), ((3358, 3387), 'numpy.prod', 'np.prod', (['observation[k].shape'], {}), '(observation[k].shape)\n', (3365, 3387), True, 'import numpy as np\n'), ((5088, 5103), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (5098, 5103), True, 'import numpy as np\n'), ((2690, 2706), 'numpy.prod', 'np.prod', (['i.shape'], {}), '(i.shape)\n', (2697, 2706), True, 'import numpy as np\n')]
|
import discord
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help("Deletes the specified number of chats. Default is 2 messages."))
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount_to_delete=2):
await ctx.message.delete()
await ctx.channel.purge(limit=amount_to_delete)
@clear.error
async def clear_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a number?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
await ctx.message.delete()
await member.kick(reason=reason)
await ctx.send(f'Kicked {member.mention} for {reason}')
@kick.error
async def kick_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
await ctx.message.delete()
await member.ban(reason=reason)
await ctx.send(f'Banned {member.mention} for {reason}')
@ban.error
async def ban_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def unban(self, ctx, *, member):
await ctx.message.delete()
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned {user.mention}#{user.discriminator}')
return
@unban.error
async def unban_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def mute(self, ctx, member: discord.Member = None):
"""Mute a member."""
await ctx.message.delete()
role = discord.utils.get(ctx.guild.roles, name="Muted")
await member.add_roles(role)
await ctx.send(member.mention + " You have been muted. Please reflect on what you said or did and come back "
"refreshed and ready to do better.")
@mute.error
async def mute_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def unmute(self, ctx, member: discord.Member = None):
"""Unmute a member."""
await ctx.message.delete()
role = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(role)
await ctx.send(member.mention + " You have been unmuted. Enjoy your new freedom!.")
@unmute.error
async def unmute_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def add_role(self, ctx, member: discord.Member, role=None):
await ctx.message.delete()
discord_role = discord.utils.get(ctx.guild.roles, name=role)
await member.add_roles(discord_role)
await ctx.send(member.mention + f' You have been added to the role: {role}. Enjoy your new role!')
@add_role.error
async def add_role_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user and a role?')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_role(self, ctx, member: discord.Member, role=None):
await ctx.message.delete()
discord_role = discord.utils.get(ctx.guild.roles, name=role)
await member.remove_roles(discord_role)
await ctx.send(member.mention + f' You have been removed from the role: {role}.')
@remove_role.error
async def remove_role_error(self, ctx, error):
embed = discord.Embed(title='Syntax Error',
colour=discord.Colour(0x9013fe),
description='Did you type a user and a role?')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Admin(client))
|
[
"discord.utils.get",
"discord.ext.commands.has_permissions",
"discord.ext.commands.command",
"discord.Colour"
] |
[((238, 284), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (262, 284), False, 'from discord.ext import commands\n'), ((716, 734), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (732, 734), False, 'from discord.ext import commands\n'), ((740, 784), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (764, 784), False, 'from discord.ext import commands\n'), ((1280, 1298), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1296, 1298), False, 'from discord.ext import commands\n'), ((1304, 1348), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (1328, 1348), False, 'from discord.ext import commands\n'), ((1840, 1858), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1856, 1858), False, 'from discord.ext import commands\n'), ((1864, 1908), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (1888, 1908), False, 'from discord.ext import commands\n'), ((2689, 2707), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2705, 2707), False, 'from discord.ext import commands\n'), ((2713, 2757), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (2737, 2757), False, 'from discord.ext import commands\n'), ((3464, 3482), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3480, 3482), False, 'from discord.ext import commands\n'), ((3488, 3532), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (3512, 3532), False, 'from discord.ext import commands\n'), ((4147, 4165), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4163, 4165), False, 'from discord.ext import commands\n'), ((4171, 4215), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (4195, 4215), False, 'from discord.ext import commands\n'), ((4845, 4863), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4861, 4863), False, 'from discord.ext import commands\n'), ((4869, 4913), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (4893, 4913), False, 'from discord.ext import commands\n'), ((2899, 2947), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': '"""Muted"""'}), "(ctx.guild.roles, name='Muted')\n", (2916, 2947), False, 'import discord\n'), ((3678, 3726), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': '"""Muted"""'}), "(ctx.guild.roles, name='Muted')\n", (3695, 3726), False, 'import discord\n'), ((4344, 4389), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'role'}), '(ctx.guild.roles, name=role)\n', (4361, 4389), False, 'import discord\n'), ((5045, 5090), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'role'}), '(ctx.guild.roles, name=role)\n', (5062, 5090), False, 'import discord\n'), ((580, 603), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (594, 603), False, 'import discord\n'), ((1146, 1169), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (1160, 1169), False, 'import discord\n'), ((1706, 1729), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (1720, 1729), False, 'import discord\n'), ((2555, 2578), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (2569, 2578), False, 'import discord\n'), ((3330, 3353), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (3344, 3353), False, 'import discord\n'), ((4013, 4036), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (4027, 4036), False, 'import discord\n'), ((4700, 4723), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (4714, 4723), False, 'import discord\n'), ((5393, 5416), 'discord.Colour', 'discord.Colour', (['(9442302)'], {}), '(9442302)\n', (5407, 5416), False, 'import discord\n')]
|
from django.contrib import admin
from django.db.models import Count
from .models import *
@admin.register(Status)
class StatusAdmin(admin.ModelAdmin):
list_display = ('code',)
@admin.register(Priority)
class PriorityAdmin(admin.ModelAdmin):
list_display = ('code',)
@admin.register(Issue)
class IssueAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'priority', 'submitter',
'submitted_date', 'modified_date')
list_filter = ('priority', 'status', 'submitted_date')
search_fields = ('title', 'description',)
@admin.register(IssueSummary)
class IssueSummary(admin.ModelAdmin):
change_list_template = 'admin/issue_summary_change_list.html'
date_hierarchy = 'submitted_date'
list_filter = (
'priority',
)
def has_add_permission(self, request):
return False
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(
request,
extra_context=extra_context,
)
try:
qs = response.context_data['cl'].queryset
except (AttributeError, KeyError):
return response
metrics = {
'total': Count('id'),
}
response.context_data['summary'] = list(
qs.values('priority__code').annotate(**metrics)
)
response.context_data['summary_total'] = dict(
qs.aggregate(**metrics)
)
return response
|
[
"django.db.models.Count",
"django.contrib.admin.register"
] |
[((94, 116), 'django.contrib.admin.register', 'admin.register', (['Status'], {}), '(Status)\n', (108, 116), False, 'from django.contrib import admin\n'), ((186, 210), 'django.contrib.admin.register', 'admin.register', (['Priority'], {}), '(Priority)\n', (200, 210), False, 'from django.contrib import admin\n'), ((282, 303), 'django.contrib.admin.register', 'admin.register', (['Issue'], {}), '(Issue)\n', (296, 303), False, 'from django.contrib import admin\n'), ((567, 595), 'django.contrib.admin.register', 'admin.register', (['IssueSummary'], {}), '(IssueSummary)\n', (581, 595), False, 'from django.contrib import admin\n'), ((1207, 1218), 'django.db.models.Count', 'Count', (['"""id"""'], {}), "('id')\n", (1212, 1218), False, 'from django.db.models import Count\n')]
|
import traceback
from typing import List
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
MAX_NUMBER_OF_LINES = 400
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Training Reporter'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def append_result_to_spreadsheet(dataset_size: int = 140,
model_name: str = "vgg4",
configuration_name="many_anchor_box_ratios",
data_augmentation="",
early_stopping: int = 20,
reduction_patience: int = 8,
learning_rate_reduction_factor: float = 0.5,
optimizer: str = "Adadelta",
initial_learning_rate: float = 1.0,
non_max_suppression_overlap_threshold: float = 0.7,
non_max_suppression_max_boxes: int = 300,
validation_accuracy: float = "0.90",
validation_total_loss: float = "0.10",
best_loss_rpn_cls: float = 999.9,
best_loss_rpn_regr: float = 999.9,
best_loss_class_cls: float = 999.9,
best_loss_class_regr: float = 999.9,
date: str = "24.12.9999",
datasets: str = "muscima_pp",
execution_time_in_seconds: int = "0"):
""" Appends the provided results to the Google Spreadsheets document
https://docs.google.com/spreadsheets/d/1MT4CH9yJD_vM9nT8JgnfmzwAVIuRoQYEyv-5FHMjYVo/edit#gid=0
"""
try:
service, spreadsheet_id = get_service_and_spreadsheet_id()
first_empty_line = get_first_empty_line_fast(service, spreadsheet_id)
print("Uploading results to Google Spreadsheet and appending at first empty line {0}".format(first_empty_line))
data = [dataset_size, model_name, configuration_name, data_augmentation, early_stopping, reduction_patience,
learning_rate_reduction_factor, optimizer, initial_learning_rate, non_max_suppression_overlap_threshold,
non_max_suppression_max_boxes, validation_accuracy, validation_total_loss, best_loss_rpn_cls,
best_loss_rpn_regr, best_loss_class_cls, best_loss_class_regr, date, datasets,
execution_time_in_seconds]
write_into_spreadsheet(service, spreadsheet_id, data, first_empty_line)
except Exception as exception:
print("Error while uploading results to Google Spreadsheet: {0}".format(str(exception)))
traceback.print_exc()
def get_service_and_spreadsheet_id():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discovery_url = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discovery_url)
spreadsheet_id = '1MT4CH9yJD_vM9nT8JgnfmzwAVIuRoQYEyv-5FHMjYVo'
return service, spreadsheet_id
def write_into_spreadsheet(service, spreadsheet_id, row_data: List[str], line_number):
value_input_option = "RAW"
body = {
'values': [
row_data,
# Another row, currently not supported
]
}
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id, range="Sheet1!A{0}:Z{0}".format(line_number),
valueInputOption=value_input_option, body=body).execute()
return result
def get_first_empty_line_fast(service, spreadsheet_id) -> int:
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range="Sheet1!A1:A{0}".format(MAX_NUMBER_OF_LINES)).execute()
values = result.get('values', [])
return len(values) + 1
if __name__ == '__main__':
append_result_to_spreadsheet()
|
[
"oauth2client.file.Storage",
"httplib2.Http",
"apiclient.discovery.build",
"traceback.print_exc",
"os.path.join",
"os.makedirs",
"oauth2client.client.flow_from_clientsecrets",
"os.path.exists",
"oauth2client.tools.run_flow",
"os.path.expanduser"
] |
[((822, 845), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (840, 845), False, 'import os\n'), ((867, 905), 'os.path.join', 'os.path.join', (['home_dir', '""".credentials"""'], {}), "(home_dir, '.credentials')\n", (879, 905), False, 'import os\n'), ((1007, 1083), 'os.path.join', 'os.path.join', (['credential_dir', '"""sheets.googleapis.com-python-quickstart.json"""'], {}), "(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n", (1019, 1083), False, 'import os\n'), ((1097, 1121), 'oauth2client.file.Storage', 'Storage', (['credential_path'], {}), '(credential_path)\n', (1104, 1121), False, 'from oauth2client.file import Storage\n'), ((4231, 4308), 'apiclient.discovery.build', 'discovery.build', (['"""sheets"""', '"""v4"""'], {'http': 'http', 'discoveryServiceUrl': 'discovery_url'}), "('sheets', 'v4', http=http, discoveryServiceUrl=discovery_url)\n", (4246, 4308), False, 'from apiclient import discovery\n'), ((917, 947), 'os.path.exists', 'os.path.exists', (['credential_dir'], {}), '(credential_dir)\n', (931, 947), False, 'import os\n'), ((957, 984), 'os.makedirs', 'os.makedirs', (['credential_dir'], {}), '(credential_dir)\n', (968, 984), False, 'import os\n'), ((1214, 1272), 'oauth2client.client.flow_from_clientsecrets', 'client.flow_from_clientsecrets', (['CLIENT_SECRET_FILE', 'SCOPES'], {}), '(CLIENT_SECRET_FILE, SCOPES)\n', (1244, 1272), False, 'from oauth2client import client\n'), ((1338, 1365), 'oauth2client.tools.run_flow', 'tools.run_flow', (['flow', 'store'], {}), '(flow, store)\n', (1352, 1365), False, 'from oauth2client import tools\n'), ((4095, 4110), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (4108, 4110), False, 'import httplib2\n'), ((3964, 3985), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3983, 3985), False, 'import traceback\n')]
|
import os
from flask import Blueprint
from flask import json, url_for, current_app
from flask import g, request, abort, send_file
from flaskext.uploads import UploadSet
from ..utils import as_resource, as_collection
from ..db import sim as sim_db
sims_page = Blueprint('sims', __name__)
#STAGE_DIR = '/data/web/htdocs/wmt/api/dev/files/downloads'
def to_resource(sim):
#links = []
#for tag in tag_db.tags_with_model(model.id):
# link = dict(rel='collection/tags')
# if tag is not None:
# link['href'] = url_for('tags.tag', id=tag.id)
# else:
# link['href'] = None
# links.append(link)
return {
'_type': 'sim',
'id': sim.id,
'uuid': sim.uuid,
'href': '/api/sims/%d' % sim.id,
'created': sim.created,
'updated': sim.updated,
'owner': sim.owner or None,
#'links': links,
}
def to_collection(sims):
return [to_resource(sim) for sim in sims]
@sims_page.route('/', methods=['GET', 'POST', 'OPTIONS'])
def show():
if request.method == 'GET':
sort = request.args.get('sort', 'id')
order = request.args.get('order', 'asc')
sims = sim_db.all(sort=sort, order=order)
collection = [to_resource(sim) for sim in sims]
return as_collection(collection)
elif request.method == 'POST':
data = json.loads(request.data)
return as_resource(to_resource(
sim_db.add(data['name'], data['model'])))
@sims_page.route('/<int:id>', methods=['GET', 'PATCH', 'REMOVE'])
def sim(id):
sim = sim_db.get(id) or abort(404)
if request.method == 'PATCH':
data = json.loads(request.data)
if set(data.keys()).issubset(['status', 'message']):
sim_db.update_status(id, **data) or abort(401)
else:
abort(400)
elif request.method == 'REMOVE':
sim_db.remove()
return as_resource(to_resource(sim))
@sims_page.route('/<int:id>/status', methods=['GET', 'PATCH', 'PUT'])
def status(id):
if request.method in ['PATCH', 'PUT']:
data = json.loads(request.data)
keys = set(data.keys())
if request.method == 'PATCH' and not keys.issubset(['status',
'message']):
abort(400)
elif request.method == 'PUT' and keys != set(['status', 'message']):
abort(400)
sim_db.update_status(**data)
sim = sim_db.get(id) or abort(404)
return as_resource({'status': sim.status,
'message': sim.message })
@sims_page.route('/<int:id>/files', methods=['GET'])
def files(id):
import tempfile, tarfile, shutil
format = request.args.get('format', 'gztar')
sim = sim_db.get(id) or abort(404)
try:
tmpdir = tempfile.mkdtemp(prefix='wmt', suffix='.d')
except:
raise
else:
archive = os.path.join(tmpdir, str(sim.uuid))
name = shutil.make_archive(archive, format,
current_app.config['STAGE_DIR'], sim.uuid)
return send_file(name, attachment_filename=os.path.basename(name),
as_attachment=True)
finally:
shutil.rmtree(tmpdir)
@sims_page.route('/<int:id>/actions', methods=['POST'])
def actions(id):
if request.method == 'POST':
data = json.loads(request.data)
if data['action'] == 'start':
sim_db.start(id)
elif data['action'] == 'stop':
sim_db.stop(id)
else:
abort(400)
|
[
"flask.Blueprint",
"shutil.make_archive",
"flask.request.args.get",
"os.path.basename",
"flask.abort",
"tempfile.mkdtemp",
"flask.json.loads",
"shutil.rmtree"
] |
[((263, 290), 'flask.Blueprint', 'Blueprint', (['"""sims"""', '__name__'], {}), "('sims', __name__)\n", (272, 290), False, 'from flask import Blueprint\n'), ((2714, 2749), 'flask.request.args.get', 'request.args.get', (['"""format"""', '"""gztar"""'], {}), "('format', 'gztar')\n", (2730, 2749), False, 'from flask import g, request, abort, send_file\n'), ((1098, 1128), 'flask.request.args.get', 'request.args.get', (['"""sort"""', '"""id"""'], {}), "('sort', 'id')\n", (1114, 1128), False, 'from flask import g, request, abort, send_file\n'), ((1145, 1177), 'flask.request.args.get', 'request.args.get', (['"""order"""', '"""asc"""'], {}), "('order', 'asc')\n", (1161, 1177), False, 'from flask import g, request, abort, send_file\n'), ((1604, 1614), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1609, 1614), False, 'from flask import g, request, abort, send_file\n'), ((1665, 1689), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1675, 1689), False, 'from flask import json, url_for, current_app\n'), ((2097, 2121), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (2107, 2121), False, 'from flask import json, url_for, current_app\n'), ((2486, 2496), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2491, 2496), False, 'from flask import g, request, abort, send_file\n'), ((2779, 2789), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2784, 2789), False, 'from flask import g, request, abort, send_file\n'), ((2817, 2860), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""wmt"""', 'suffix': '""".d"""'}), "(prefix='wmt', suffix='.d')\n", (2833, 2860), False, 'import tempfile, tarfile, shutil\n'), ((2966, 3045), 'shutil.make_archive', 'shutil.make_archive', (['archive', 'format', "current_app.config['STAGE_DIR']", 'sim.uuid'], {}), "(archive, format, current_app.config['STAGE_DIR'], sim.uuid)\n", (2985, 3045), False, 'import tempfile, tarfile, shutil\n'), ((3222, 3243), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (3235, 3243), False, 'import tempfile, tarfile, shutil\n'), ((3367, 3391), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3377, 3391), False, 'from flask import json, url_for, current_app\n'), ((1376, 1400), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1386, 1400), False, 'from flask import json, url_for, current_app\n'), ((1836, 1846), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1841, 1846), False, 'from flask import g, request, abort, send_file\n'), ((2309, 2319), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2314, 2319), False, 'from flask import g, request, abort, send_file\n'), ((1799, 1809), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (1804, 1809), False, 'from flask import g, request, abort, send_file\n'), ((2409, 2419), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2414, 2419), False, 'from flask import g, request, abort, send_file\n'), ((3132, 3154), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (3148, 3154), False, 'import os\n'), ((3552, 3562), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (3557, 3562), False, 'from flask import g, request, abort, send_file\n')]
|
#!/usr/bin/python
import sys
import re
gatedTweetPath = sys.argv[1]
inputPath = sys.argv[2]
outputPath = sys.argv[3]
tweetIdRegEx = re.compile("[0-9]{18}")
gatedTweetSet = set()
with open(gatedTweetPath, "r") as f:
for l in f:
gatedTweetSet.add(long(l))
# print gatedTweetSet
outputFile = open(outputPath, "w")
tweetIdIndex = None
with open(inputPath, "r") as f:
firstLine = f.next()
firstLine = firstLine.replace("\t", " ")
arr = firstLine.split(" ")
for i, e in enumerate(arr):
# print i, e
if ( tweetIdRegEx.match(e) ):
tweetIdIndex = i
break
# print tweetIdIndex
with open(inputPath, "r") as f:
for l in f:
l = l.replace("\t", " ")
arr = l.split(" ")
tweetId = long(arr[tweetIdIndex])
if ( tweetId in gatedTweetSet ):
outputFile.write(l)
outputFile.close()
|
[
"re.compile"
] |
[((135, 158), 're.compile', 're.compile', (['"""[0-9]{18}"""'], {}), "('[0-9]{18}')\n", (145, 158), False, 'import re\n')]
|
import tests.scenarios as scenarios
from tests.api_test_case import APITestCase
from entities import Match, Player
import events
class TestHappyPath(APITestCase):
@classmethod
def setUpClass(cls):
scenarios.two_players()
def match_setup(self):
match_id = self.post_to_create_a_new_match()
self.post_player_1_setup(match_id)
self.post_player_2_prompt(match_id)
return match_id
def post_to_create_a_new_match(self):
response = self.assertPost201('/matches')
self.assertJson(response, '_id')
match_id = response.json()['_id']
self.assertGet200('/matches/' + match_id)
return match_id
def post_player_1_setup(self, match_id):
request_data = {'player_id': 1, 'deck_id': 1}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/'+match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 1)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Setup)
def post_player_2_prompt(self, match_id):
request_data = {'player_id': 2, 'deck_id': 2}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 2)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Prompt)
def test_simulated_match(self):
match_id = self.match_setup()
self.play_turn_1(match_id)
self.assertPost200('/matches/' + match_id + '/players/2/end_turn')
self.play_and_use_counter(match_id)
self.post_end_turn(match_id)
def play_turn_1(self, match_id):
self.post_play_card(match_id)
self.post_use_card_to_get_resources(match_id)
self.post_use_resources_to_play_a_card(match_id)
self.post_use_card_to_deal_damage(match_id)
self.post_end_turn(match_id)
def post_play_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.current_player().board)
previous_hand = len(match.players[0].hand)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
board = len(match.current_player().board)
self.assertEqual(board, previous_board + 1)
cards_in_hand = len(match.players[0].hand)
self.assertEqual(cards_in_hand, previous_hand - 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_get_resources(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/1')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertGreater(resources.a, 0)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_resources_to_play_a_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.players[0].board)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertEqual(resources.a, 0)
cards_in_the_board = len(match.players[0].board)
self.assertEqual(cards_in_the_board, previous_board + 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_deal_damage(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
enemy = match.players[1]
self.assertLess(enemy.hp, Player.INITIAL_HP)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_end_turn(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/end_turn')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
self.assertEqual(match.current_player_index, 1)
def play_and_use_counter(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_hp = match.players[1].hp
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/play/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/use/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
hp = match.players[1].hp
self.assertEqual(len(match.stack), 0)
self.assertEqual(previous_hp, hp)
|
[
"tests.scenarios.two_players"
] |
[((216, 239), 'tests.scenarios.two_players', 'scenarios.two_players', ([], {}), '()\n', (237, 239), True, 'import tests.scenarios as scenarios\n')]
|
from __future__ import print_function, division, absolute_import, unicode_literals
import datetime
def div_timedelta_int(d, i):
d_us = d.microseconds + 1000000 * (d.seconds + 86400 * d.days)
return datetime.timedelta(microseconds=d_us / i)
def div_timedelta(d1, d2):
if isinstance(d2, int):
return div_timedelta_int(d1, d2)
d1_us = d1.microseconds + 1000000 * (d1.seconds + 86400 * d1.days)
d2_us = d2.microseconds + 1000000 * (d2.seconds + 86400 * d2.days)
return d1_us / d2_us
|
[
"datetime.timedelta"
] |
[((208, 249), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': '(d_us / i)'}), '(microseconds=d_us / i)\n', (226, 249), False, 'import datetime\n')]
|
from socketserver import BaseRequestHandler
import pathlib
import os
import mimetypes
import urllib.parse as urlparse
urlparse.uses_netloc.append("gemini")
urlparse.uses_relative.append("gemini")
class GeminiRequest:
"""A Gemini request, with URL and access to the underlying socket."""
def __init__(self,sock,url,initial_buf=b''):
self._sock = sock
self._buffer = initial_buf
self.url = url
self.parsed = urlparse.urlparse(url)
def closest_power_of_two(self,n):
"""Returns the power of two that is closest to, while being greater than, n."""
retval = 2
while retval<n: retval*=2
return retval
def recv(self,bufsize,flags=0):
"""A proxy over self._sock.recv that handles the initial buffer as well as other buffer problems."""
# time to do some funky shit
# do we have bufsize in our buffer?
if bufsize<=len(self._buffer):
# return that much
retval, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return retval
# if not, then ask for a power of two that's more than what was asked for
temp = self._sock.recv(self.closest_power_of_two(bufsize),flags)
self._buffer += temp
# now do we have bufsize in our buffer?
if bufsize<=len(self._buffer):
# return that much
retval, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return retval
else: # if not, just return what we have and go for it
retval, self._buffer = self._buffer, b''
return retval
def send(self,*args,**kwargs):
"""Plain alias of self._sock.sendall."""
return self._sock.sendall(*args,**kwargs)
def __getattr__(self,k):
"""Attempt to alias unknown attributes to self.parsed."""
# try and get the attribute off the parsed URL object
return getattr(self.parsed,k)
class GeminiRequestHandler(BaseRequestHandler):
HOSTS = [] # hostnames we can serve
PORT = 1965 # port we serve
ROOT = "/var/gemini" # root directory from which we serve files
DEFAULT_DEFAULT_META = { # default for the DEFAULT_META var
40: "Resource temporarily unavailable",
41: "Server unavailable",
42: "Unexpected error in CGI program",
43: "Unexpected error during handling of proxy request",
44: 60,
50: "Permanent failure",
51: "Not found",
52: "It's gone, Jim",
53: "Proxy request refused",
59: "Bad request",
60: "Provide a client certificate to continue",
61: "Not authorized to access this content",
62: "Invalid certificate provided"
}
def setup(self):
"""Gets us ready to handle the request. Any implementation-specific things should be done in setup_overrideable."""
self.peer_cert = self.request.get_peer_certificate()
self.setup_overrideable()
def handle(self):
"""Handles request. Parses request line and delegates response handling."""
buffer = b''
while b'\n' not in buffer and (temp:=self.request.recv(512)): buffer+=temp
if buffer[buffer.index(b'\n')-1]!=13: # request line must end with \r\n
self.header(59) # bad request
return
request, buffer = buffer[:buffer.index(b'\n')-1], buffer[buffer.index(b'\n')+1:]
if len(request)>1024: # maximum URL length is 1024 bytes
self.header(59) # bad request
return
try:
request = self.massage_request_line(request.decode("utf-8"),buffer)
except:
self.header(59) # bad request
return
if not self.preflight(request):
return # preflight will return the appropriate status code
if hasattr(self,f"handle_{request.scheme}"): # if we have a handler for that status...
getattr(self,f"handle_{request.scheme}")(request) # ...use it
else: # if not...
self.header(53) # treat it as a proxy request and refuse it
def massage_request_line(self,request_line,buffer):
"""Massages the request line into a GeminiRequest object."""
return GeminiRequest(self.request,request_line,buffer) # set up GeminiRequest object
def header(self,response_code,meta=""):
"""Sends a response header down the line. Will default to the entry in self.DEFAULT_META if it exists and meta is not provided."""
if not meta: meta = self.DEFAULT_META.get(response_code,"")
self.request.sendall(f"{response_code!s} {meta}\r\n".encode("utf-8"))
def preflight(self,request):
"""Preflight checks. Is the request for a URL we can serve?"""
if request.hostname not in self.HOSTS:
self.header(53) # refuse proxy requests
return False
port = request.port or 1965 # default to the default port
if port != self.PORT:
self.header(53) # refuse proxy requests
return False
return True # otherwise we're good
def handle_gemini(self,request):
"""Basic static file server. Default for gemini URLs."""
path = pathlib.Path(request.path.strip("/"))
file = pathlib.Path(os.path.normpath(request.path.strip("/")))
if file.is_absolute() or str(file).startswith(".."):
self.header(59)
return
filesystem = pathlib.Path(self.ROOT)/request.hostname/file
try:
if not os.access(filesystem,os.R_OK):
self.header(51) # not found
return
except OSError: # some OS-related error, treat it like it doesn't exist
self.header(51)
return
if filesystem.is_dir():
if (tmp:=filesystem/pathlib.Path("index.gmi")).exists():
filesystem = tmp
else:
self.directory_list(request,filesystem)
return
if not filesystem.exists():
self.header(51) # not found
return
else: # it exists and it's a file
self.send_file(request,filesystem)
def directory_list(self,request,dir):
"""Directory listing. I haven't implemented it yet, so it just returns a 40 error."""
self.header(40,"Resource unavailable") # NYI
def send_file(self,request,file):
"""Send the file at pathlib.Path object file to the request at request."""
mimetype = self.guess_mimetype(file)
self.header(20,mimetype)
with file.open("rb") as f:
while (data:=f.read(2048)):
request.send(data)
def guess_mimetype(self,path):
"""Use self.mime mimetypes.MimeTypes instance to guess mimetypes. Defaults to application/octet-stream."""
type, encoding = self.mime.guess_type(path.name)
if encoding: return f"{type}; charset={encoding}"
else: return type or "application/octet-stream"
def setup_overrideable(self):
"""Setting up self.DEFAULT_META and self.mime. If your mixin requires special setup override this method and call super().setup_overrideable(self)."""
self.DEFAULT_META = {}
self.DEFAULT_META.update(self.DEFAULT_DEFAULT_META)
self.mime = mimetypes.MimeTypes()
self.mime.add_type("text/gemini",".gmi")
self.mime.add_type("text/gemini",".gemini")
|
[
"pathlib.Path",
"mimetypes.MimeTypes",
"os.access",
"urllib.parse.uses_relative.append",
"urllib.parse.uses_netloc.append",
"urllib.parse.urlparse"
] |
[((118, 155), 'urllib.parse.uses_netloc.append', 'urlparse.uses_netloc.append', (['"""gemini"""'], {}), "('gemini')\n", (145, 155), True, 'import urllib.parse as urlparse\n'), ((156, 195), 'urllib.parse.uses_relative.append', 'urlparse.uses_relative.append', (['"""gemini"""'], {}), "('gemini')\n", (185, 195), True, 'import urllib.parse as urlparse\n'), ((417, 439), 'urllib.parse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (434, 439), True, 'import urllib.parse as urlparse\n'), ((6334, 6355), 'mimetypes.MimeTypes', 'mimetypes.MimeTypes', ([], {}), '()\n', (6353, 6355), False, 'import mimetypes\n'), ((4772, 4795), 'pathlib.Path', 'pathlib.Path', (['self.ROOT'], {}), '(self.ROOT)\n', (4784, 4795), False, 'import pathlib\n'), ((4835, 4865), 'os.access', 'os.access', (['filesystem', 'os.R_OK'], {}), '(filesystem, os.R_OK)\n', (4844, 4865), False, 'import os\n'), ((5061, 5086), 'pathlib.Path', 'pathlib.Path', (['"""index.gmi"""'], {}), "('index.gmi')\n", (5073, 5086), False, 'import pathlib\n')]
|
"""Added phone field for User
Revision ID: 330568e8928c
Revises: <PASSWORD>
Create Date: 2015-02-05 16:53:40.517660
"""
# revision identifiers, used by Alembic.
revision = '330568e8928c'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('phone', sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'phone')
### end Alembic commands ###
|
[
"sqlalchemy.String",
"alembic.op.drop_column"
] |
[((553, 585), 'alembic.op.drop_column', 'op.drop_column', (['"""users"""', '"""phone"""'], {}), "('users', 'phone')\n", (567, 585), False, 'from alembic import op\n'), ((394, 415), 'sqlalchemy.String', 'sa.String', ([], {'length': '(100)'}), '(length=100)\n', (403, 415), True, 'import sqlalchemy as sa\n')]
|
from ec_models import Adhesion
from ec_xl_parse import get_oil_properties_by_category
from ec_oil_props import get_oil_weathering
from ec_oil_misc import g_cm_2_to_kg_m_2
def get_oil_adhesions(oil_columns, field_indexes):
'''
Getting the adhesion is fairly straightforward. We simply get the
value in g/cm^2 and convert to kg/m^2.
Dimensional parameters are simply (weathering).
'''
weathering = get_oil_weathering(oil_columns, field_indexes)
adhesions = get_adhesions_by_weathering(oil_columns,
field_indexes,
weathering)
return adhesions
def get_adhesions_by_weathering(oil_columns, field_indexes, weathering):
adhesions = []
props = get_oil_properties_by_category(oil_columns, field_indexes,
'adhesion_g_cm2_ests_1996')
prop_names = props.keys()
for idx, vals in enumerate(zip(*props.values())):
adhesion_kwargs = build_adhesion_kwargs(prop_names, vals,
weathering[idx])
adhesions.append(adhesion_kwargs)
return [Adhesion(**a) for a in adhesions
if a['kg_m_2'] is not None]
def build_adhesion_kwargs(prop_names, values, weathering):
'''
Build adhesion properties dictionary suitable to be passed in as
keyword args.
- prop_names: The list of property names
- values: A list of Excel cell objects representing the properties.
- weathering: The fractional oil weathering amount.
'''
adhesion_kwargs = dict(zip(prop_names, [v[0].value for v in values]))
adhesion_kwargs['weathering'] = weathering
adhesion_kwargs['kg_m_2'] = g_cm_2_to_kg_m_2(adhesion_kwargs['adhesion'])
return adhesion_kwargs
|
[
"ec_oil_misc.g_cm_2_to_kg_m_2",
"ec_xl_parse.get_oil_properties_by_category",
"ec_oil_props.get_oil_weathering",
"ec_models.Adhesion"
] |
[((437, 483), 'ec_oil_props.get_oil_weathering', 'get_oil_weathering', (['oil_columns', 'field_indexes'], {}), '(oil_columns, field_indexes)\n', (455, 483), False, 'from ec_oil_props import get_oil_weathering\n'), ((785, 875), 'ec_xl_parse.get_oil_properties_by_category', 'get_oil_properties_by_category', (['oil_columns', 'field_indexes', '"""adhesion_g_cm2_ests_1996"""'], {}), "(oil_columns, field_indexes,\n 'adhesion_g_cm2_ests_1996')\n", (815, 875), False, 'from ec_xl_parse import get_oil_properties_by_category\n'), ((1771, 1816), 'ec_oil_misc.g_cm_2_to_kg_m_2', 'g_cm_2_to_kg_m_2', (["adhesion_kwargs['adhesion']"], {}), "(adhesion_kwargs['adhesion'])\n", (1787, 1816), False, 'from ec_oil_misc import g_cm_2_to_kg_m_2\n'), ((1186, 1199), 'ec_models.Adhesion', 'Adhesion', ([], {}), '(**a)\n', (1194, 1199), False, 'from ec_models import Adhesion\n')]
|
from dataclasses import dataclass
import time
from tinyman.v1.client import TinymanTestnetClient, TinymanMainnetClient
from utils import get_trades
from colorama import Fore
@dataclass
class Account:
"""
DataClass For Bot Account
"""
address: str
private_key: str
class Bot:
def __init__(self, account: Account, network: str, interval: int):
"""
Args:
- account: Account object containing address and private_key
- network: "testnet" or "mainnet".
- trade: contains trade info
- interval: sleeping interval for bot in milliseconds
"""
self.account = account
self.network = network
self.interval = interval
self.client = TinymanMainnetClient(user_address=account.address) if network == "mainnet" else TinymanTestnetClient(user_address=account.address)
def run(self):
print(Fore.GREEN, "Bot Is Running ...")
if not self.client.is_opted_in():
print(Fore.GREEN, "Optin In Progress ...")
self._optin()
while True:
trades = get_trades(self.network, self.account.address)
if not trades:
print(Fore.RED, "No Trade To Execute")
break
for trade in trades:
self._execute(trade)
print(Fore.GREEN, f'Bot Sleeping For {self.interval} Seconds ...')
time.sleep(self.interval)
def _optin(self):
"""
Opts In TinyMan App into Acount
"""
transaction_group = self.client.prepare_app_optin_transactions()
self._submit_txn(transaction_group)
def _execute(self, trade):
"""
Executes A Trade.
Args:
- trade: An Instance of Trade class in mongo db
"""
t_asset1 = trade.asset1
t_asset2 = trade.asset2
t_asset_in = trade.asset_in
asset1 = self.client.fetch_asset(int(t_asset1.asset_id))
asset2 = self.client.fetch_asset(int(t_asset2.asset_id))
pool = self.client.fetch_pool(asset1, asset2)
if t_asset_in.asset_id != t_asset2.asset_id:
quote = pool.fetch_fixed_input_swap_quote(
asset1(trade.asset_in_amt*10**asset1.decimals), float(trade.slippage))
else:
quote = pool.fetch_fixed_input_swap_quote(
asset2(trade.asset_in_amt*10**asset2.decimals), float(trade.slippage))
amt_in = quote.amount_in_with_slippage
amt_out = quote.amount_out_with_slippage
amt_in = amt_in.amount/10**amt_in.asset.decimals
amt_out = amt_out.amount/10**amt_out.asset.decimals
price = amt_out/amt_in
if price >= float(trade.min_sell_price):
self._create_swap_txn(quote, pool)
if trade.do_redeem:
self._redeem(pool, t_asset_in, t_asset2, t_asset1)
trade.is_completed = True
trade.save()
else:
print(Fore.RED, f"Price Target Not Reached, Moving To The Next Trade...")
def _create_swap_txn(self, quote, pool):
transaction_group = pool.prepare_swap_transactions_from_quote(quote)
self._submit_txn(transaction_group)
def _redeem(self, pool, asset_in, asset2, asset1):
excess = pool.fetch_excess_amounts()
if asset_in.asset_id != asset2.asset_id:
if asset2 in excess:
self._submit_redeem(asset2, excess, pool)
else:
if asset1 in excess:
self._submit_redeem(asset1, excess, pool)
def _submit_redeem(self, asset, excess, pool):
amount = excess[asset]
transaction_group = pool.prepare_redeem_transactions(amount)
self._submit_txn(transaction_group)
def _submit_txn(self, txn):
txn.sign_with_private_key(
self.account.address, self.account.private_key)
self.client.submit(txn, wait=True)
|
[
"utils.get_trades",
"tinyman.v1.client.TinymanMainnetClient",
"tinyman.v1.client.TinymanTestnetClient",
"time.sleep"
] |
[((751, 801), 'tinyman.v1.client.TinymanMainnetClient', 'TinymanMainnetClient', ([], {'user_address': 'account.address'}), '(user_address=account.address)\n', (771, 801), False, 'from tinyman.v1.client import TinymanTestnetClient, TinymanMainnetClient\n'), ((831, 881), 'tinyman.v1.client.TinymanTestnetClient', 'TinymanTestnetClient', ([], {'user_address': 'account.address'}), '(user_address=account.address)\n', (851, 881), False, 'from tinyman.v1.client import TinymanTestnetClient, TinymanMainnetClient\n'), ((1114, 1160), 'utils.get_trades', 'get_trades', (['self.network', 'self.account.address'], {}), '(self.network, self.account.address)\n', (1124, 1160), False, 'from utils import get_trades\n'), ((1426, 1451), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (1436, 1451), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-03 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matsucon2018', '0002_auto_20180203_2326'),
]
operations = [
migrations.AddField(
model_name='signupextra',
name='shirt_size',
field=models.CharField(choices=[('NO_SHIRT', 'En halua paitaa'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('OTHER', 'Muu koko (kerro Vapaa sana -kentässä)')], default='NO_SHIRT', max_length=8, verbose_name='Työvoiman T-paidan koko'),
),
]
|
[
"django.db.models.CharField"
] |
[((372, 619), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('NO_SHIRT', 'En halua paitaa'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL',\n 'XL'), ('OTHER', 'Muu koko (kerro Vapaa sana -kentässä)')]", 'default': '"""NO_SHIRT"""', 'max_length': '(8)', 'verbose_name': '"""Työvoiman T-paidan koko"""'}), "(choices=[('NO_SHIRT', 'En halua paitaa'), ('S', 'S'), ('M',\n 'M'), ('L', 'L'), ('XL', 'XL'), ('OTHER',\n 'Muu koko (kerro Vapaa sana -kentässä)')], default='NO_SHIRT',\n max_length=8, verbose_name='Työvoiman T-paidan koko')\n", (388, 619), False, 'from django.db import migrations, models\n')]
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import traceback
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_CONTEXT_NAME = 'ImpervaWAF'
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
session_id = ''
def do_request(self, method, url_suffix, json_data=None):
if not self.session_id:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406), resp_type='response')
if res.status_code == 401:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406),
resp_type='response')
if res.text:
res = res.json()
else:
res = {}
extract_errors(res)
return res
def login(self):
res = self._http_request('POST', 'SecureSphere/api/v1/auth/session', auth=self._auth)
extract_errors(res)
self.session_id = res.get('session-id')
def get_ip_group_entities(self, group_name, table_name):
raw_res = self.do_request('GET', f'conf/ipGroups/{group_name}')
entries = []
for entry in raw_res.get('entries'):
entries.append({'Type': entry.get('type'),
'IpAddressFrom': entry.get('ipAddressFrom'),
'IpAddressTo': entry.get('ipAddressTo'),
'NetworkAddress': entry.get('networkAddress'),
'CidrMask': entry.get('cidrMask')})
human_readable = tableToMarkdown(table_name, entries, removeNull=True,
headers=['Type', 'IpAddressFrom', 'IpAddressTo', 'NetworkAddress', 'CidrMask'])
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':
{'Name': group_name, 'Entries': entries}}
return human_readable, entry_context, raw_res
def get_custom_policy_outputs(self, policy_name, table_name):
raw_res = self.do_request('GET', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
policy = {'Name': policy_name,
'Enabled': raw_res.get('enabled'),
'OneAlertPerSession': raw_res.get('oneAlertPerSession'),
'DisplayResponsePage': raw_res.get('displayResponsePage'),
'Severity': raw_res.get('severity'),
'Action': raw_res.get('action'),
'FollowedAction': raw_res.get('followedAction'),
'ApplyTo': raw_res.get('applyTo'),
'MatchCriteria': raw_res.get('matchCriteria')}
hr_policy = policy.copy()
del hr_policy['MatchCriteria']
del hr_policy['ApplyTo']
human_readable = tableToMarkdown(table_name, hr_policy, removeNull=True)
if raw_res.get('applyTo'):
human_readable += '\n\n' + tableToMarkdown('Services to apply the policy to', raw_res.get('applyTo'),
removeNull=True)
for match in raw_res.get('matchCriteria', []):
tmp_match = match.copy()
operation = match['operation']
match_type = match['type']
# generate human readable for sourceIpAddresses type
if match_type == 'sourceIpAddresses':
if tmp_match.get('userDefined'):
for i, element in enumerate(tmp_match['userDefined']):
tmp_match['userDefined'][i] = {'IP Address': tmp_match['userDefined'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Source IP addresses:',
tmp_match['userDefined'], removeNull=True)
if tmp_match.get('ipGroups'):
for i, element in enumerate(tmp_match['ipGroups']):
tmp_match['ipGroups'][i] = {'Group name': tmp_match['ipGroups'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n IP Groups:',
tmp_match['ipGroups'], removeNull=True)
# generate human readable for sourceGeolocation type
elif match_type == 'sourceGeolocation':
if tmp_match.get('values'):
for i, element in enumerate(tmp_match['values']):
tmp_match['values'][i] = {'Country name': tmp_match['values'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Countries to match:',
tmp_match['values'], removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policy}
return human_readable, entry_context, raw_res
def extract_errors(res):
if not isinstance(res, list) and res.get('errors'):
error_message = ''
for err in res['errors']:
error_message += f'error-code: {err.get("error-code")}, description: {err.get("description")}'
raise Exception(error_message)
def generate_policy_data_body(args):
severity = args.get('severity')
action = args.get('action')
followed_action = args.get('followed-action')
body = {}
if args.get('enabled'):
body['enabled'] = args['enabled'] == 'True'
if args.get('one-alert-per-session'):
body['oneAlertPerSession'] = args['one-alert-per-session'] == 'True'
if args.get('display-response-page'):
body['displayResponsePage'] = args['display-response-page'] == 'True'
if severity:
body['severity'] = severity
if action:
body['action'] = action
if followed_action:
body['followedAction'] = followed_action
return body
def generate_match_criteria(body, args):
geo_location_criteria_operation = args.get('geo-location-criteria-operation')
ip_addresses_criteria_operation = args.get('ip-addresses-criteria-operation')
ip_groups = args.get('ip-groups', '')
ip_addreses = args.get('ip-addresses', '')
country_names = args.get('country-names', '')
match_criteria = []
if geo_location_criteria_operation:
if not country_names:
raise Exception('country-names argument is empty')
geo_location_match_item = {'type': 'sourceGeolocation',
'operation': geo_location_criteria_operation,
'values': country_names.split(',')}
match_criteria.append(geo_location_match_item)
if ip_addresses_criteria_operation:
if not ip_groups and not ip_addreses:
raise Exception('ip-groups and ip-addresses arguments are empty, please fill at least one of them')
ip_addresses_match_item = {'type': 'sourceIpAddresses',
'operation': ip_addresses_criteria_operation}
if ip_groups:
ip_addresses_match_item['ipGroups'] = ip_groups.split(',')
if ip_addreses:
ip_addresses_match_item['userDefined'] = ip_addreses.split(',')
match_criteria.append(ip_addresses_match_item)
body['matchCriteria'] = match_criteria
return body
def generate_ip_groups_entries(args):
entry_type = args.get('entry-type')
ip_from = args.get('ip-address-from')
ip_to = args.get('ip-address-to')
network_address = args.get('network-address')
cidr_mask = args.get('cidr-mask')
operation = args.get('operation')
json_entries = args.get('json-entries')
if not json_entries:
entry = {}
if entry_type == 'single':
entry['ipAddressFrom'] = ip_from
elif entry_type == 'range':
entry['ipAddressFrom'] = ip_from
entry['ipAddressTo'] = ip_to
elif entry_type == 'network':
entry['networkAddress'] = network_address
entry['cidrMask'] = cidr_mask
else:
raise Exception('entry-type argument is invalid')
entry['type'] = entry_type
entry['operation'] = operation
body = {'entries': [entry]}
else:
try:
json_entries = json.loads(json_entries)
except Exception:
raise Exception(f'Failed to parse json-entries as JSON data, 'f' received object:\n{json_entries}')
body = {'entries': json_entries}
return body
@logger
def test_module(client, args):
raw_res = client.do_request('GET', 'conf/sites')
if raw_res.get('sites'):
demisto.results('ok')
@logger
def ip_group_list_command(client, args):
raw_res = client.do_request('GET', 'conf/ipGroups')
groups = []
if raw_res.get('names'):
groups = raw_res['names']
for i, element in enumerate(groups):
groups[i] = {'Name': groups[i]}
human_readable = tableToMarkdown('IP groups', groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)': groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_list_entries_command(client, args):
group_name = args.get('ip-group-name')
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'IP group entries for {group_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_remove_entries_command(client, args):
group_name = args.get('ip-group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}/clear')
return_outputs(f'The IP group {group_name} is now empty', {}, raw_res)
@logger
def sites_list_command(client, args):
raw_res = client.do_request('GET', 'conf/sites')
sites = [{'Name': site} for site in raw_res.get('sites', [])]
human_readable = tableToMarkdown('All sites in the system', sites, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Site(val.Name===obj.Name)': sites}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_groups_list_command(client, args):
site = args.get('site-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}')
server_groups = []
if raw_res.get('server-groups'):
server_groups = raw_res['server-groups']
for i, element in enumerate(server_groups):
server_groups[i] = {'Name': server_groups[i], 'SiteName': site}
human_readable = tableToMarkdown(f'Server groups in {site}', server_groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.ServerGroup(val.Name===obj.Name)': server_groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_group_policies_list_command(client, args):
site = args.get('site-name')
server_group = args.get('server-group-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}/{server_group}/securityPolicies')
policies = []
for policy in raw_res:
policies.append({'System': policy.get('system'),
'PolicyName': policy.get('policy-name'),
'PolicyType': policy.get('policy-type'),
'ServerGroup': server_group,
'SiteName': site})
human_readable = tableToMarkdown(f'Policies for {server_group}', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.SecurityPolicy(val.PolicyName===obj.PolicyName)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def custom_policy_list_command(client, args):
raw_res = client.do_request('GET', 'conf/policies/security/webServiceCustomPolicies')
policies = []
if raw_res.get('customWebPolicies'):
policies = raw_res['customWebPolicies']
for i, element in enumerate(policies):
policies[i] = {'Name': policies[i]}
human_readable = tableToMarkdown('Custom web policies', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def get_custom_policy_command(client, args):
policy_name = args.get('policy-name')
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy data for {policy_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def create_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('POST', f'conf/ipGroups/{group_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('PUT', f'conf/ipGroups/{group_name}/data', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_ip_group_command(client, args):
group_name = args.get('group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}')
return_outputs(f'Group {group_name} deleted successfully', {}, raw_res)
@logger
def create_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply')
web_service = args.get('web-service-name-to-apply')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise Exception(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
body['applyTo'] = [{'siteName': site, 'serverGroupName': server_group, 'webServiceName': web_service}]
client.do_request('POST', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply', '')
web_service = args.get('web-service-name-to-apply', '')
apply_operation = args.get('apply-operation', '')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise DemistoException(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
if apply_operation:
body['applyTo'] = [{'operation': apply_operation, 'siteName': site, 'serverGroupName': server_group,
'webServiceName': web_service}]
client.do_request('PUT', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_custom_policy_command(client, args):
policy_name = args.get('policy-name')
raw_res = client.do_request('DELETE', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
return_outputs(f'Policy {policy_name} deleted successfully', {}, raw_res)
def main():
params = demisto.params()
# get the service API url
base_url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
credentials = params.get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command,
}
if command in commands:
commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
[
"requests.packages.urllib3.disable_warnings",
"demistomock.args",
"json.loads",
"demistomock.command",
"traceback.format_exc",
"demistomock.params",
"demistomock.results"
] |
[((191, 235), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (233, 235), False, 'import requests\n'), ((16963, 16979), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (16977, 16979), True, 'import demistomock as demisto\n'), ((9097, 9118), 'demistomock.results', 'demisto.results', (['"""ok"""'], {}), "('ok')\n", (9112, 9118), True, 'import demistomock as demisto\n'), ((17557, 17574), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (17572, 17574), True, 'import demistomock as demisto\n'), ((17590, 17604), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (17602, 17604), True, 'import demistomock as demisto\n'), ((8746, 8770), 'json.loads', 'json.loads', (['json_entries'], {}), '(json_entries)\n', (8756, 8770), False, 'import json\n'), ((14504, 14535), 'json.loads', 'json.loads', (['match_criteria_json'], {}), '(match_criteria_json)\n', (14514, 14535), False, 'import json\n'), ((15799, 15830), 'json.loads', 'json.loads', (['match_criteria_json'], {}), '(match_criteria_json)\n', (15809, 15830), False, 'import json\n'), ((17350, 17367), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (17365, 17367), True, 'import demistomock as demisto\n'), ((19168, 19190), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (19188, 19190), False, 'import traceback\n')]
|
##############################################################################
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# File Abstract:
# Sinds an 'I am alive' message once in a while
#
##############################################################################
import xml.dom.minidom
from xml.parsers.expat import ExpatError
from Helpers import Log
from Data import ConnectionPoint
from Data.ConnectionPoint import ConnectionType
from Helpers import Target
from Helpers import TargetManager
from Helpers import ThreadManager
from Helpers import Configuration
from Helpers import VersionMgr
from Util import Time
from Util import Sleep
#############
# This class sends a heartbeat (watchdog re-arm) to all upstream Oscars, so we don't timout
#############
class WatchdogTimer(object):
def __init__(self):
name = "Watchdog Timer Thread"
self.__WorkerThread = ThreadManager.GetThreadManager().CreateThread(name,self.WatchdogProc)
ThreadManager.GetThreadManager().StartThread(name)
def WatchdogProc(self,fnKillSignalled,userData):
lastUpdate = 0
interval = Configuration.get().GetTimeoutPeriod() * 0.25 # send a watchdog at 4x rate of timeout
buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
buffer = buffer + "<Oscar Type=\"WatchdogTimer\">"
buffer = buffer + "<Version>1.0</Version>"
buffer = buffer + "<Port>"+str(Configuration.get().GetUpstreamConnection().getPort())+"</Port>"
buffer = buffer + "</Oscar>"
while not fnKillSignalled(): # run until signalled to end - call passed function to check for the signal
if lastUpdate < Time.GetCurrMS() - interval:
TargetManager.GetTargetManager().BroadcastUpstreamToType(buffer,ConnectionType.UpstreamOscar) # send heartbeat to all upstream Oscars
lastUpdate = Time.GetCurrMS()
Sleep.Sleep(0.25) #snooze for 250 ms
#############
# This class sends connection info to everything downstream periodically
# those downstream things (other Oscars and Marvins) use this to send packets back
#############
class ConnectionUpdateTimer(object):
def __init__(self):
name = "Connection Update Timer Thread"
self.__WorkerThread = ThreadManager.GetThreadManager().CreateThread(name,self.WorkerProc)
ThreadManager.GetThreadManager().StartThread(name)
def WorkerProc(self,fnKillSignalled,userData):
lastUpdate = 0
interval = Configuration.get().GetConnectionUpdateInterval()
buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
buffer = buffer + "<Oscar Type=\"ConnectionInformation\">"
buffer = buffer + "<Version>1.0</Version>"
buffer = buffer + "<OscarVersion>" + VersionMgr.ReadVer() + "</OscarVersion>"
buffer = buffer + "<ID>" + Configuration.get().GetID()+"</ID>"
buffer = buffer + "<Port>"+str(Configuration.get().GetDownstreamConnection().getPort())+"</Port>"
buffer = buffer + "</Oscar>"
#<?xml version="1.0" encoding="utf-8"?>
#<Oscar Type="ConnectionInformation">
# <Version>1.0</Version>
# <ID>Foo</Foo>
# <Port>Port</Port>
#</Oscar>
while not fnKillSignalled(): # run until signalled to end - call passed function to check for the signal
if lastUpdate < Time.GetCurrMS() - interval:
TargetManager.GetTargetManager().BroadcastDownstream(buffer,True,None) # send Connection Data to all downstream things (Oscars & Marvins)
lastUpdate = Time.GetCurrMS()
Configuration.get().RescanTargets()
else:
Sleep.Sleep(0.25)
TargetManager.GetTargetManager().CheckForRemovalOfDynamicMarvins()
|
[
"Util.Time.GetCurrMS",
"Helpers.ThreadManager.GetThreadManager",
"Helpers.TargetManager.GetTargetManager",
"Helpers.Configuration.get",
"Helpers.VersionMgr.ReadVer",
"Util.Sleep.Sleep"
] |
[((2534, 2551), 'Util.Sleep.Sleep', 'Sleep.Sleep', (['(0.25)'], {}), '(0.25)\n', (2545, 2551), False, 'from Util import Sleep\n'), ((1526, 1558), 'Helpers.ThreadManager.GetThreadManager', 'ThreadManager.GetThreadManager', ([], {}), '()\n', (1556, 1558), False, 'from Helpers import ThreadManager\n'), ((1604, 1636), 'Helpers.ThreadManager.GetThreadManager', 'ThreadManager.GetThreadManager', ([], {}), '()\n', (1634, 1636), False, 'from Helpers import ThreadManager\n'), ((2504, 2520), 'Util.Time.GetCurrMS', 'Time.GetCurrMS', ([], {}), '()\n', (2518, 2520), False, 'from Util import Time\n'), ((2911, 2943), 'Helpers.ThreadManager.GetThreadManager', 'ThreadManager.GetThreadManager', ([], {}), '()\n', (2941, 2943), False, 'from Helpers import ThreadManager\n'), ((2987, 3019), 'Helpers.ThreadManager.GetThreadManager', 'ThreadManager.GetThreadManager', ([], {}), '()\n', (3017, 3019), False, 'from Helpers import ThreadManager\n'), ((3132, 3151), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (3149, 3151), False, 'from Helpers import Configuration\n'), ((3408, 3428), 'Helpers.VersionMgr.ReadVer', 'VersionMgr.ReadVer', ([], {}), '()\n', (3426, 3428), False, 'from Helpers import VersionMgr\n'), ((4225, 4241), 'Util.Time.GetCurrMS', 'Time.GetCurrMS', ([], {}), '()\n', (4239, 4241), False, 'from Util import Time\n'), ((4346, 4363), 'Util.Sleep.Sleep', 'Sleep.Sleep', (['(0.25)'], {}), '(0.25)\n', (4357, 4363), False, 'from Util import Sleep\n'), ((1752, 1771), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (1769, 1771), False, 'from Helpers import Configuration\n'), ((2296, 2312), 'Util.Time.GetCurrMS', 'Time.GetCurrMS', ([], {}), '()\n', (2310, 2312), False, 'from Util import Time\n'), ((4013, 4029), 'Util.Time.GetCurrMS', 'Time.GetCurrMS', ([], {}), '()\n', (4027, 4029), False, 'from Util import Time\n'), ((2341, 2373), 'Helpers.TargetManager.GetTargetManager', 'TargetManager.GetTargetManager', ([], {}), '()\n', (2371, 2373), False, 'from Helpers import TargetManager\n'), ((3484, 3503), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (3501, 3503), False, 'from Helpers import Configuration\n'), ((4058, 4090), 'Helpers.TargetManager.GetTargetManager', 'TargetManager.GetTargetManager', ([], {}), '()\n', (4088, 4090), False, 'from Helpers import TargetManager\n'), ((4275, 4294), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (4292, 4294), False, 'from Helpers import Configuration\n'), ((4380, 4412), 'Helpers.TargetManager.GetTargetManager', 'TargetManager.GetTargetManager', ([], {}), '()\n', (4410, 4412), False, 'from Helpers import TargetManager\n'), ((2052, 2071), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (2069, 2071), False, 'from Helpers import Configuration\n'), ((3559, 3578), 'Helpers.Configuration.get', 'Configuration.get', ([], {}), '()\n', (3576, 3578), False, 'from Helpers import Configuration\n')]
|
import jax
import chex
from typing import Union, Optional
from .decoder import Decoder
from ...utils import ParameterReshaper
class RandomDecoder(Decoder):
def __init__(
self,
num_encoding_dims: int,
placeholder_params: Union[chex.ArrayTree, chex.Array],
rng: chex.PRNGKey = jax.random.PRNGKey(0),
rademacher: bool = False,
identity: bool = False,
n_devices: Optional[int] = None,
):
super().__init__(
num_encoding_dims, placeholder_params, identity, n_devices
)
self.rademacher = rademacher
# Instantiate base reshaper class
self.base_reshaper = ParameterReshaper(
placeholder_params, identity, n_devices
)
# Sample a random matrix - Gaussian or Rademacher (+1/-1)
if not self.rademacher:
self.project_matrix = jax.random.normal(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
else:
self.project_matrix = jax.random.rademacher(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
def reshape(self, x: chex.Array) -> chex.ArrayTree:
"""Perform reshaping for random projection case."""
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (
x @ self.project_matrix
) # (popsize, num_enc_dim) x (num_enc_dim, num_dims)
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape(project_x)
return x_reshaped
def reshape_single(self, x: chex.Array) -> chex.ArrayTree:
"""Reshape a single flat vector using random projection matrix."""
x_re = x.reshape(1, self.num_encoding_dims)
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (x_re @ self.project_matrix).squeeze()
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape_single(project_x)
return x_reshaped
|
[
"jax.random.PRNGKey",
"jax.random.normal",
"jax.random.rademacher"
] |
[((313, 334), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (331, 334), False, 'import jax\n'), ((878, 964), 'jax.random.normal', 'jax.random.normal', (['rng', '(self.num_encoding_dims, self.base_reshaper.total_params)'], {}), '(rng, (self.num_encoding_dims, self.base_reshaper.\n total_params))\n', (895, 964), False, 'import jax\n'), ((1038, 1128), 'jax.random.rademacher', 'jax.random.rademacher', (['rng', '(self.num_encoding_dims, self.base_reshaper.total_params)'], {}), '(rng, (self.num_encoding_dims, self.base_reshaper.\n total_params))\n', (1059, 1128), False, 'import jax\n')]
|
##############################################################################
# Created by: <NAME>
# Email: <EMAIL>
#
# Note: This code was heavily inspired from https://github.com/junfu1115/DANet
##############################################################################
from __future__ import division
from torch.nn import Module, Conv2d, Parameter, Softmax
import torch
import torch.nn as nn
torch_ver = torch.__version__[:3]
__all__ = ['PAM', 'CAM']
class PAM(Module):
""" Position attention module"""
def __init__(self, in_dim, squeezing=8):
super(PAM, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1, dilation=2)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out
return out
class CAM(Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
n = proj_query.shape[-1]
avg = torch.mean(proj_query, dim=2, keepdim=True).repeat([1,1,proj_query.shape[-1]])
proj_query -=avg
proj_key = proj_query.permute(0, 2, 1)
energy = torch.bmm(1/n*proj_query, proj_key)
attention = self.softmax(energy)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out
return out
class WAM(nn.Module):
def __init__(self, in_channels, out_channels, squeezing_factor=4, squeezing_factor_pam=8, norm_layer=nn.BatchNorm2d):
super(WAM, self).__init__()
inter_channels = in_channels // squeezing_factor
self.conv5a = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.sa = PAM(inter_channels, squeezing=squeezing_factor_pam)
self.sc = CAM(inter_channels)
self.conv51 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv52 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU())
self.conv8 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))
def forward(self, x):
feat1 = self.conv5a(x)
sa_feat = self.sa(feat1)
sa_conv = self.conv51(sa_feat)
feat2 = self.conv5c(x)
sc_feat = self.sc(feat2)
sc_conv = self.conv52(sc_feat)
feat_sum = sa_conv + sc_conv
sasc_output = self.conv8(feat_sum)
return sasc_output
|
[
"torch.mean",
"torch.nn.Dropout2d",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Softmax",
"torch.zeros"
] |
[((659, 734), 'torch.nn.Conv2d', 'Conv2d', ([], {'in_channels': 'in_dim', 'out_channels': '(in_dim // squeezing)', 'kernel_size': '(1)'}), '(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1)\n', (665, 734), False, 'from torch.nn import Module, Conv2d, Parameter, Softmax\n'), ((759, 850), 'torch.nn.Conv2d', 'Conv2d', ([], {'in_channels': 'in_dim', 'out_channels': '(in_dim // squeezing)', 'kernel_size': '(1)', 'dilation': '(2)'}), '(in_channels=in_dim, out_channels=in_dim // squeezing, kernel_size=1,\n dilation=2)\n', (765, 850), False, 'from torch.nn import Module, Conv2d, Parameter, Softmax\n'), ((873, 935), 'torch.nn.Conv2d', 'Conv2d', ([], {'in_channels': 'in_dim', 'out_channels': 'in_dim', 'kernel_size': '(1)'}), '(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n', (879, 935), False, 'from torch.nn import Module, Conv2d, Parameter, Softmax\n'), ((1006, 1021), 'torch.nn.Softmax', 'Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1013, 1021), False, 'from torch.nn import Module, Conv2d, Parameter, Softmax\n'), ((1484, 1515), 'torch.bmm', 'torch.bmm', (['proj_query', 'proj_key'], {}), '(proj_query, proj_key)\n', (1493, 1515), False, 'import torch\n'), ((2027, 2042), 'torch.nn.Softmax', 'Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2034, 2042), False, 'from torch.nn import Module, Conv2d, Parameter, Softmax\n'), ((2578, 2617), 'torch.bmm', 'torch.bmm', (['(1 / n * proj_query)', 'proj_key'], {}), '(1 / n * proj_query, proj_key)\n', (2587, 2617), False, 'import torch\n'), ((2717, 2749), 'torch.bmm', 'torch.bmm', (['attention', 'proj_value'], {}), '(attention, proj_value)\n', (2726, 2749), False, 'import torch\n'), ((967, 981), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (978, 981), False, 'import torch\n'), ((1988, 2002), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1999, 2002), False, 'import torch\n'), ((3128, 3192), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'inter_channels', '(3)'], {'padding': '(1)', 'bias': '(False)'}), '(in_channels, inter_channels, 3, padding=1, bias=False)\n', (3137, 3192), True, 'import torch.nn as nn\n'), ((3294, 3303), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3301, 3303), True, 'import torch.nn as nn\n'), ((3342, 3406), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'inter_channels', '(3)'], {'padding': '(1)', 'bias': '(False)'}), '(in_channels, inter_channels, 3, padding=1, bias=False)\n', (3351, 3406), True, 'import torch.nn as nn\n'), ((3508, 3517), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3515, 3517), True, 'import torch.nn as nn\n'), ((3664, 3731), 'torch.nn.Conv2d', 'nn.Conv2d', (['inter_channels', 'inter_channels', '(3)'], {'padding': '(1)', 'bias': '(False)'}), '(inter_channels, inter_channels, 3, padding=1, bias=False)\n', (3673, 3731), True, 'import torch.nn as nn\n'), ((3833, 3842), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3840, 3842), True, 'import torch.nn as nn\n'), ((3880, 3947), 'torch.nn.Conv2d', 'nn.Conv2d', (['inter_channels', 'inter_channels', '(3)'], {'padding': '(1)', 'bias': '(False)'}), '(inter_channels, inter_channels, 3, padding=1, bias=False)\n', (3889, 3947), True, 'import torch.nn as nn\n'), ((4049, 4058), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4056, 4058), True, 'import torch.nn as nn\n'), ((4096, 4120), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)', '(False)'], {}), '(0.1, False)\n', (4108, 4120), True, 'import torch.nn as nn\n'), ((4122, 4164), 'torch.nn.Conv2d', 'nn.Conv2d', (['inter_channels', 'out_channels', '(1)'], {}), '(inter_channels, out_channels, 1)\n', (4131, 4164), True, 'import torch.nn as nn\n'), ((2410, 2453), 'torch.mean', 'torch.mean', (['proj_query'], {'dim': '(2)', 'keepdim': '(True)'}), '(proj_query, dim=2, keepdim=True)\n', (2420, 2453), False, 'import torch\n')]
|
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib import layers
from memory import DKVMN
from utils import getLogger
# set logger
logger = getLogger('Deep-IRT-model')
def tensor_description(var):
"""Returns a compact and informative string about a tensor.
Args:
var: A tensor variable.
Returns:
a string with type and size, e.g.: (float32 1x8x8x1024).
"""
description = '(' + str(var.dtype.name) + ' '
sizes = var.get_shape()
for i, size in enumerate(sizes):
description += str(size)
if i < len(sizes) - 1:
description += 'x'
description += ')'
return description
class DeepIRTModel(object):
def __init__(self, args, sess, name="KT"):
self.args = args
self.sess = sess
self.name = name
self.create_model()
def create_model(self):
self._create_placeholder()
self._influence()
self._create_loss()
self._create_optimizer()
self._add_summary()
def _create_placeholder(self):
logger.info("Initializing Placeholder")
self.q_data = tf.placeholder(tf.int32, [self.args.batch_size, self.args.seq_len], name='q_data')
self.qa_data = tf.placeholder(tf.int32, [self.args.batch_size, self.args.seq_len], name='qa_data')
self.label = tf.placeholder(tf.float32, [self.args.batch_size, self.args.seq_len], name='label')
def _influence(self):
# Initialize Memory
logger.info("Initializing Key and Value Memory")
with tf.variable_scope("Memory"):
init_key_memory = tf.get_variable(
'key_memory_matrix', [self.args.memory_size, self.args.key_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
init_value_memory = tf.get_variable(
'value_memory_matrix', [self.args.memory_size, self.args.value_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# Boardcast value-memory matrix to Shape (batch_size, memory_size, memory_value_state_dim)
init_value_memory = tf.tile( # tile the number of value-memory by the number of batch
tf.expand_dims(init_value_memory, 0), # make the batch-axis
tf.stack([self.args.batch_size, 1, 1])
)
logger.debug("Shape of init_value_memory = {}".format(init_value_memory.get_shape()))
logger.debug("Shape of init_key_memory = {}".format(init_key_memory.get_shape()))
# Initialize DKVMN
self.memory = DKVMN(
memory_size=self.args.memory_size,
key_memory_state_dim=self.args.key_memory_state_dim,
value_memory_state_dim=self.args.value_memory_state_dim,
init_key_memory=init_key_memory,
init_value_memory=init_value_memory,
name="DKVMN"
)
# Initialize Embedding
logger.info("Initializing Q and QA Embedding")
with tf.variable_scope('Embedding'):
q_embed_matrix = tf.get_variable(
'q_embed', [self.args.n_questions+1, self.args.key_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
qa_embed_matrix = tf.get_variable(
'qa_embed', [2*self.args.n_questions+1, self.args.value_memory_state_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# Embedding to Shape (batch size, seq_len, memory_state_dim(d_k or d_v))
logger.info("Initializing Embedding Lookup")
q_embed_data = tf.nn.embedding_lookup(q_embed_matrix, self.q_data)
qa_embed_data = tf.nn.embedding_lookup(qa_embed_matrix, self.qa_data)
logger.debug("Shape of q_embed_data: {}".format(q_embed_data.get_shape()))
logger.debug("Shape of qa_embed_data: {}".format(qa_embed_data.get_shape()))
sliced_q_embed_data = tf.split(
value=q_embed_data, num_or_size_splits=self.args.seq_len, axis=1
)
sliced_qa_embed_data = tf.split(
value=qa_embed_data, num_or_size_splits=self.args.seq_len, axis=1
)
logger.debug("Shape of sliced_q_embed_data[0]: {}".format(sliced_q_embed_data[0].get_shape()))
logger.debug("Shape of sliced_qa_embed_data[0]: {}".format(sliced_qa_embed_data[0].get_shape()))
pred_z_values = list()
student_abilities = list()
question_difficulties = list()
reuse_flag = False
logger.info("Initializing Influence Procedure")
for i in range(self.args.seq_len):
# To reuse linear vectors
if i != 0:
reuse_flag = True
# Get the query and content vector
q = tf.squeeze(sliced_q_embed_data[i], 1)
qa = tf.squeeze(sliced_qa_embed_data[i], 1)
logger.debug("qeury vector q: {}".format(q))
logger.debug("content vector qa: {}".format(qa))
# Attention, correlation_weight: Shape (batch_size, memory_size)
self.correlation_weight = self.memory.attention(embedded_query_vector=q)
logger.debug("correlation_weight: {}".format(self.correlation_weight))
# Read process, read_content: (batch_size, value_memory_state_dim)
self.read_content = self.memory.read(correlation_weight=self.correlation_weight)
logger.debug("read_content: {}".format(self.read_content))
# Write process, new_memory_value: Shape (batch_size, memory_size, value_memory_state_dim)
self.new_memory_value = self.memory.write(self.correlation_weight, qa, reuse=reuse_flag)
logger.debug("new_memory_value: {}".format(self.new_memory_value))
# Build the feature vector -- summary_vector
mastery_level_prior_difficulty = tf.concat([self.read_content, q], 1)
self.summary_vector = layers.fully_connected(
inputs=mastery_level_prior_difficulty,
num_outputs=self.args.summary_vector_output_dim,
scope='SummaryOperation',
reuse=reuse_flag,
activation_fn=tf.nn.tanh
)
logger.debug("summary_vector: {}".format(self.summary_vector))
# Calculate the student ability level from summary vector
student_ability = layers.fully_connected(
inputs=self.summary_vector,
num_outputs=1,
scope='StudentAbilityOutputLayer',
reuse=reuse_flag,
activation_fn=None
)
# Calculate the question difficulty level from the question embedding
question_difficulty = layers.fully_connected(
inputs=q,
num_outputs=1,
scope='QuestionDifficultyOutputLayer',
reuse=reuse_flag,
activation_fn=tf.nn.tanh
)
# Prediction
pred_z_value = 3.0 * student_ability - question_difficulty
pred_z_values.append(pred_z_value)
student_abilities.append(student_ability)
question_difficulties.append(question_difficulty)
self.pred_z_values = tf.reshape(
tf.stack(pred_z_values, axis=1),
[self.args.batch_size, self.args.seq_len]
)
self.student_abilities = tf.reshape(
tf.stack(student_abilities, axis=1),
[self.args.batch_size, self.args.seq_len]
)
self.question_difficulties = tf.reshape(
tf.stack(question_difficulties, axis=1),
[self.args.batch_size, self.args.seq_len]
)
logger.debug("Shape of pred_z_values: {}".format(self.pred_z_values))
logger.debug("Shape of student_abilities: {}".format(self.student_abilities))
logger.debug("Shape of question_difficulties: {}".format(self.question_difficulties))
def _create_loss(self):
logger.info("Initializing Loss Function")
# convert into 1D
label_1d = tf.reshape(self.label, [-1])
pred_z_values_1d = tf.reshape(self.pred_z_values, [-1])
student_abilities_1d = tf.reshape(self.student_abilities, [-1])
question_difficulties_1d = tf.reshape(self.question_difficulties, [-1])
# find the label index that is not masking
index = tf.where(tf.not_equal(label_1d, tf.constant(-1., dtype=tf.float32)))
# masking
filtered_label = tf.gather(label_1d, index)
filtered_z_values = tf.gather(pred_z_values_1d, index)
filtered_student_abilities = tf.gather(student_abilities_1d, index)
filtered_question_difficulties = tf.gather(question_difficulties_1d, index)
logger.debug("Shape of filtered_label: {}".format(filtered_label))
logger.debug("Shape of filtered_z_values: {}".format(filtered_z_values))
logger.debug("Shape of filtered_student_abilities: {}".format(filtered_student_abilities))
logger.debug("Shape of filtered_question_difficulties: {}".format(filtered_question_difficulties))
if self.args.use_ogive_model:
# make prediction using normal ogive model
dist = tfd.Normal(loc=0.0, scale=1.0)
self.pred = dist.cdf(pred_z_values_1d)
filtered_pred = dist.cdf(filtered_z_values)
else:
self.pred = tf.math.sigmoid(pred_z_values_1d)
filtered_pred = tf.math.sigmoid(filtered_z_values)
# convert the prediction probability to logit, i.e., log(p/(1-p))
epsilon = 1e-6
clipped_filtered_pred = tf.clip_by_value(filtered_pred, epsilon, 1.-epsilon)
filtered_logits = tf.log(clipped_filtered_pred/(1-clipped_filtered_pred))
# cross entropy loss
cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=filtered_logits,
labels=filtered_label
)
)
self.loss = cross_entropy
def _create_optimizer(self):
with tf.variable_scope('Optimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate)
gvs = self.optimizer.compute_gradients(self.loss)
clipped_gvs = [(tf.clip_by_norm(grad, self.args.max_grad_norm), var) for grad, var in gvs]
self.train_op = self.optimizer.apply_gradients(clipped_gvs)
def _add_summary(self):
tf.summary.scalar('Loss', self.loss)
self.tensorboard_writer = tf.summary.FileWriter(
logdir=self.args.tensorboard_dir,
graph=self.sess.graph
)
model_vars = tf.trainable_variables()
total_size = 0
total_bytes = 0
model_msg = ""
for var in model_vars:
# if var.num_elements() is None or [] assume size 0.
var_size = var.get_shape().num_elements() or 0
var_bytes = var_size * var.dtype.size
total_size += var_size
total_bytes += var_bytes
model_msg += ' '.join(
[var.name,
tensor_description(var),
'[%d, bytes: %d]' % (var_size, var_bytes)]
)
model_msg += '\n'
model_msg += 'Total size of variables: %d \n' % total_size
model_msg += 'Total bytes of variables: %d \n' % total_bytes
logger.info(model_msg)
|
[
"tensorflow.clip_by_value",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.split",
"utils.getLogger",
"memory.DKVMN",
"tensorflow.gather",
"tensorflow.math.sigmoid",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.squeeze",
"tensorflow.clip_by_norm",
"tensorflow.truncated_normal_initializer",
"tensorflow.summary.scalar",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.nn.embedding_lookup",
"tensorflow.constant",
"tensorflow.log",
"tensorflow.expand_dims",
"tensorflow.train.AdamOptimizer"
] |
[((209, 236), 'utils.getLogger', 'getLogger', (['"""Deep-IRT-model"""'], {}), "('Deep-IRT-model')\n", (218, 236), False, 'from utils import getLogger\n'), ((1139, 1226), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.args.batch_size, self.args.seq_len]'], {'name': '"""q_data"""'}), "(tf.int32, [self.args.batch_size, self.args.seq_len], name=\n 'q_data')\n", (1153, 1226), True, 'import tensorflow as tf\n'), ((1245, 1333), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.args.batch_size, self.args.seq_len]'], {'name': '"""qa_data"""'}), "(tf.int32, [self.args.batch_size, self.args.seq_len], name=\n 'qa_data')\n", (1259, 1333), True, 'import tensorflow as tf\n'), ((1350, 1438), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.args.batch_size, self.args.seq_len]'], {'name': '"""label"""'}), "(tf.float32, [self.args.batch_size, self.args.seq_len], name=\n 'label')\n", (1364, 1438), True, 'import tensorflow as tf\n'), ((2609, 2857), 'memory.DKVMN', 'DKVMN', ([], {'memory_size': 'self.args.memory_size', 'key_memory_state_dim': 'self.args.key_memory_state_dim', 'value_memory_state_dim': 'self.args.value_memory_state_dim', 'init_key_memory': 'init_key_memory', 'init_value_memory': 'init_value_memory', 'name': '"""DKVMN"""'}), "(memory_size=self.args.memory_size, key_memory_state_dim=self.args.\n key_memory_state_dim, value_memory_state_dim=self.args.\n value_memory_state_dim, init_key_memory=init_key_memory,\n init_value_memory=init_value_memory, name='DKVMN')\n", (2614, 2857), False, 'from memory import DKVMN\n'), ((3659, 3710), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['q_embed_matrix', 'self.q_data'], {}), '(q_embed_matrix, self.q_data)\n', (3681, 3710), True, 'import tensorflow as tf\n'), ((3735, 3788), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['qa_embed_matrix', 'self.qa_data'], {}), '(qa_embed_matrix, self.qa_data)\n', (3757, 3788), True, 'import tensorflow as tf\n'), ((3989, 4063), 'tensorflow.split', 'tf.split', ([], {'value': 'q_embed_data', 'num_or_size_splits': 'self.args.seq_len', 'axis': '(1)'}), '(value=q_embed_data, num_or_size_splits=self.args.seq_len, axis=1)\n', (3997, 4063), True, 'import tensorflow as tf\n'), ((4117, 4192), 'tensorflow.split', 'tf.split', ([], {'value': 'qa_embed_data', 'num_or_size_splits': 'self.args.seq_len', 'axis': '(1)'}), '(value=qa_embed_data, num_or_size_splits=self.args.seq_len, axis=1)\n', (4125, 4192), True, 'import tensorflow as tf\n'), ((8160, 8188), 'tensorflow.reshape', 'tf.reshape', (['self.label', '[-1]'], {}), '(self.label, [-1])\n', (8170, 8188), True, 'import tensorflow as tf\n'), ((8216, 8252), 'tensorflow.reshape', 'tf.reshape', (['self.pred_z_values', '[-1]'], {}), '(self.pred_z_values, [-1])\n', (8226, 8252), True, 'import tensorflow as tf\n'), ((8284, 8324), 'tensorflow.reshape', 'tf.reshape', (['self.student_abilities', '[-1]'], {}), '(self.student_abilities, [-1])\n', (8294, 8324), True, 'import tensorflow as tf\n'), ((8360, 8404), 'tensorflow.reshape', 'tf.reshape', (['self.question_difficulties', '[-1]'], {}), '(self.question_difficulties, [-1])\n', (8370, 8404), True, 'import tensorflow as tf\n'), ((8586, 8612), 'tensorflow.gather', 'tf.gather', (['label_1d', 'index'], {}), '(label_1d, index)\n', (8595, 8612), True, 'import tensorflow as tf\n'), ((8641, 8675), 'tensorflow.gather', 'tf.gather', (['pred_z_values_1d', 'index'], {}), '(pred_z_values_1d, index)\n', (8650, 8675), True, 'import tensorflow as tf\n'), ((8713, 8751), 'tensorflow.gather', 'tf.gather', (['student_abilities_1d', 'index'], {}), '(student_abilities_1d, index)\n', (8722, 8751), True, 'import tensorflow as tf\n'), ((8793, 8835), 'tensorflow.gather', 'tf.gather', (['question_difficulties_1d', 'index'], {}), '(question_difficulties_1d, index)\n', (8802, 8835), True, 'import tensorflow as tf\n'), ((9714, 9769), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['filtered_pred', 'epsilon', '(1.0 - epsilon)'], {}), '(filtered_pred, epsilon, 1.0 - epsilon)\n', (9730, 9769), True, 'import tensorflow as tf\n'), ((9793, 9852), 'tensorflow.log', 'tf.log', (['(clipped_filtered_pred / (1 - clipped_filtered_pred))'], {}), '(clipped_filtered_pred / (1 - clipped_filtered_pred))\n', (9799, 9852), True, 'import tensorflow as tf\n'), ((10562, 10598), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'self.loss'], {}), "('Loss', self.loss)\n", (10579, 10598), True, 'import tensorflow as tf\n'), ((10633, 10711), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ([], {'logdir': 'self.args.tensorboard_dir', 'graph': 'self.sess.graph'}), '(logdir=self.args.tensorboard_dir, graph=self.sess.graph)\n', (10654, 10711), True, 'import tensorflow as tf\n'), ((10768, 10792), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10790, 10792), True, 'import tensorflow as tf\n'), ((1559, 1586), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Memory"""'], {}), "('Memory')\n", (1576, 1586), True, 'import tensorflow as tf\n'), ((2251, 2287), 'tensorflow.expand_dims', 'tf.expand_dims', (['init_value_memory', '(0)'], {}), '(init_value_memory, 0)\n', (2265, 2287), True, 'import tensorflow as tf\n'), ((2324, 2362), 'tensorflow.stack', 'tf.stack', (['[self.args.batch_size, 1, 1]'], {}), '([self.args.batch_size, 1, 1])\n', (2332, 2362), True, 'import tensorflow as tf\n'), ((3027, 3057), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Embedding"""'], {}), "('Embedding')\n", (3044, 3057), True, 'import tensorflow as tf\n'), ((4822, 4859), 'tensorflow.squeeze', 'tf.squeeze', (['sliced_q_embed_data[i]', '(1)'], {}), '(sliced_q_embed_data[i], 1)\n', (4832, 4859), True, 'import tensorflow as tf\n'), ((4877, 4915), 'tensorflow.squeeze', 'tf.squeeze', (['sliced_qa_embed_data[i]', '(1)'], {}), '(sliced_qa_embed_data[i], 1)\n', (4887, 4915), True, 'import tensorflow as tf\n'), ((5935, 5971), 'tensorflow.concat', 'tf.concat', (['[self.read_content, q]', '(1)'], {}), '([self.read_content, q], 1)\n', (5944, 5971), True, 'import tensorflow as tf\n'), ((6007, 6197), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', ([], {'inputs': 'mastery_level_prior_difficulty', 'num_outputs': 'self.args.summary_vector_output_dim', 'scope': '"""SummaryOperation"""', 'reuse': 'reuse_flag', 'activation_fn': 'tf.nn.tanh'}), "(inputs=mastery_level_prior_difficulty, num_outputs=\n self.args.summary_vector_output_dim, scope='SummaryOperation', reuse=\n reuse_flag, activation_fn=tf.nn.tanh)\n", (6029, 6197), False, 'from tensorflow.contrib import layers\n'), ((6458, 6601), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', ([], {'inputs': 'self.summary_vector', 'num_outputs': '(1)', 'scope': '"""StudentAbilityOutputLayer"""', 'reuse': 'reuse_flag', 'activation_fn': 'None'}), "(inputs=self.summary_vector, num_outputs=1, scope=\n 'StudentAbilityOutputLayer', reuse=reuse_flag, activation_fn=None)\n", (6480, 6601), False, 'from tensorflow.contrib import layers\n'), ((6808, 6948), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', ([], {'inputs': 'q', 'num_outputs': '(1)', 'scope': '"""QuestionDifficultyOutputLayer"""', 'reuse': 'reuse_flag', 'activation_fn': 'tf.nn.tanh'}), "(inputs=q, num_outputs=1, scope=\n 'QuestionDifficultyOutputLayer', reuse=reuse_flag, activation_fn=tf.nn.tanh\n )\n", (6830, 6948), False, 'from tensorflow.contrib import layers\n'), ((7355, 7386), 'tensorflow.stack', 'tf.stack', (['pred_z_values'], {'axis': '(1)'}), '(pred_z_values, axis=1)\n', (7363, 7386), True, 'import tensorflow as tf\n'), ((7510, 7545), 'tensorflow.stack', 'tf.stack', (['student_abilities'], {'axis': '(1)'}), '(student_abilities, axis=1)\n', (7518, 7545), True, 'import tensorflow as tf\n'), ((7672, 7711), 'tensorflow.stack', 'tf.stack', (['question_difficulties'], {'axis': '(1)'}), '(question_difficulties, axis=1)\n', (7680, 7711), True, 'import tensorflow as tf\n'), ((9487, 9520), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['pred_z_values_1d'], {}), '(pred_z_values_1d)\n', (9502, 9520), True, 'import tensorflow as tf\n'), ((9549, 9583), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['filtered_z_values'], {}), '(filtered_z_values)\n', (9564, 9583), True, 'import tensorflow as tf\n'), ((9931, 10022), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'filtered_logits', 'labels': 'filtered_label'}), '(logits=filtered_logits, labels=\n filtered_label)\n', (9970, 10022), True, 'import tensorflow as tf\n'), ((10165, 10195), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Optimizer"""'], {}), "('Optimizer')\n", (10182, 10195), True, 'import tensorflow as tf\n'), ((10226, 10287), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.args.learning_rate'}), '(learning_rate=self.args.learning_rate)\n', (10248, 10287), True, 'import tensorflow as tf\n'), ((8505, 8540), 'tensorflow.constant', 'tf.constant', (['(-1.0)'], {'dtype': 'tf.float32'}), '(-1.0, dtype=tf.float32)\n', (8516, 8540), True, 'import tensorflow as tf\n'), ((1757, 1800), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1788, 1800), True, 'import tensorflow as tf\n'), ((1978, 2021), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (2009, 2021), True, 'import tensorflow as tf\n'), ((3219, 3262), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (3250, 3262), True, 'import tensorflow as tf\n'), ((3443, 3486), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (3474, 3486), True, 'import tensorflow as tf\n'), ((10378, 10424), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad', 'self.args.max_grad_norm'], {}), '(grad, self.args.max_grad_norm)\n', (10393, 10424), True, 'import tensorflow as tf\n')]
|
import requests
from pages.book_pages import AllBooksPage
# For extracting data from single page of a Website
page_content = requests.get("http://books.toscrape.com/index.html").content
page = AllBooksPage(page_content)
books = page.books
# Far extracting data from multiple pages of a Website
for p_num in range(1, page.totalpages): #To Extract data from first 10 pages
url = f"http://books.toscrape.com/catalogue/page-{p_num+1}.html"
page_content = requests.get(url).content
page = AllBooksPage(page_content)
books.extend(page.books)
USER_CHOICE ="""Enter the choice accordingly:-
- "b" for printing BEST BOOKS
- "c" for printing CHEAPEST BOOKS
- "o" for printing ALL BOOKS CONTENT
- "q" for EXIT
Enter your choice:"""
def print_best_books():
best_books = sorted(books, key=lambda x: x.rating * -1)[:10] # Top 10 highest rated books
for book in best_books:
print(book)
def print_cheapest_books():
cheap_books = sorted(books, key=lambda x: x.price)[:10] # Top 10 least price books
for book in cheap_books:
print(book)
def overall_content():
for book in books:
print(book)
user_choices = {
"b": print_best_books,
"c": print_cheapest_books,
"o": overall_content
}
def menu():
user_input = input(USER_CHOICE).strip()
while user_input!="q":
if user_input in ("b", "c", "o"):
user_choices[user_input]()
else:
print("<Wrong Input: Please! Enter correct choice>")
user_input = input(USER_CHOICE)
#Driver Function
if __name__ == "__main__":
menu()
|
[
"pages.book_pages.AllBooksPage",
"requests.get"
] |
[((195, 221), 'pages.book_pages.AllBooksPage', 'AllBooksPage', (['page_content'], {}), '(page_content)\n', (207, 221), False, 'from pages.book_pages import AllBooksPage\n'), ((126, 178), 'requests.get', 'requests.get', (['"""http://books.toscrape.com/index.html"""'], {}), "('http://books.toscrape.com/index.html')\n", (138, 178), False, 'import requests\n'), ((500, 526), 'pages.book_pages.AllBooksPage', 'AllBooksPage', (['page_content'], {}), '(page_content)\n', (512, 526), False, 'from pages.book_pages import AllBooksPage\n'), ((462, 479), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (474, 479), False, 'import requests\n')]
|
# tangoREST.py
#
# Implements objects used to pass state within Tango.
#
import redis
import pickle
import Queue
import logging
from datetime import datetime, timedelta
from config import Config
redisConnection = None
# Pass in an existing connection to redis, sometimes necessary for testing.
def getRedisConnection(connection=None):
global redisConnection
if redisConnection is None:
if connection:
redisConnection = connection
return redisConnection
redisConnection = redis.StrictRedis(
host=Config.REDIS_HOSTNAME, port=Config.REDIS_PORT, db=0)
return redisConnection
class InputFile():
"""
InputFile - Stores pointer to the path on the local machine and the
name of the file on the destination machine
"""
def __init__(self, localFile, destFile):
self.localFile = localFile
self.destFile = destFile
def __repr__(self):
return "InputFile(localFile: %s, destFile: %s)" % (self.localFile,
self.destFile)
class TangoMachine():
"""
TangoMachine - A description of the Autograding Virtual Machine
"""
def __init__(self, name="DefaultTestVM", image=None, vmms=None,
network=None, cores=None, memory=None, fallback_instance_type=None, disk=None,
domain_name=None, ec2_id=None, resume=None, id=None,
instance_id=None):
self.name = name
self.image = image
self.network = network
self.cores = cores
self.memory = memory
self.fallback_instance_type = fallback_instance_type
self.disk = disk
self.vmms = vmms
self.domain_name = domain_name
self.ec2_id = ec2_id
self.resume = resume
self.id = id
self.instance_id = id
# The following attributes can instruct vmms to set the test machine
# aside for further investigation.
self.keepForDebugging = False
self.notes = None
def __repr__(self):
return "TangoMachine(image: %s, vmms: %s, id: %s)" % (self.image, self.vmms, self.id)
class TangoJob():
"""
TangoJob - A job that is to be run on a TangoMachine
"""
def __init__(self, vm=None,
outputFile=None, name=None, limitingKey=None, input=None,
notifyURL=None, timeout=0,
maxOutputFileSize=Config.MAX_OUTPUT_FILE_SIZE,
accessKeyId=None, accessKey=None):
self.assigned = False
self.retries = 0
self.vm = vm
if input is None:
self.input = []
else:
self.input = input
self.outputFile = outputFile
self.name = name
self.limitingKey = limitingKey
self.notifyURL = notifyURL
self.timeout = timeout
self.trace = []
self.maxOutputFileSize = maxOutputFileSize
self._remoteLocation = None
self.accessKeyId = accessKeyId
self.accessKey = accessKey
self.tm = datetime.now()
self.startTime = None
self.endTime = None
def makeAssigned(self):
self.syncRemote()
self.assigned = True
self.updateRemote()
def makeUnassigned(self):
self.syncRemote()
self.assigned = False
self.updateRemote()
def isNotAssigned(self):
self.syncRemote()
return not self.assigned
def appendTrace(self, trace_str):
# trace attached to the object can be retrived and sent to rest api caller
self.syncRemote()
self.trace.append("%s|%s" % (datetime.now().ctime(), trace_str))
self.updateRemote()
def setId(self, new_id):
self.id = new_id
if self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
dictionary.delete(key)
self._remoteLocation = dict_hash + ":" + str(new_id)
self.updateRemote()
# Record in the job object that now is the time the job started.
def recordStartTime(self):
self.syncRemote()
self.startTime = datetime.now()
self.updateRemote()
# Record in the job object that now is the time the job completed.
def recordEndTime(self):
self.syncRemote()
self.endTime = datetime.now()
self.updateRemote()
# Calculate the running time of the job.
# If the job hasn't started (as determined by the presence of the startTime
# field), then return the timedelta value corresponding to 0.
# If the job has started but not finished (as determined by the presence of
# the endTime field), then return the timedelta between startTime and now.
# If the job has finished, then return the timedelta between startTime and
# endTime.
def runningTime(self):
if self.startTime == None:
return timedelta()
if self.endTime == None:
return datetime.now() - self.startTime
return self.endTime - self.startTime
def syncRemote(self):
if Config.USE_REDIS and self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
temp_job = dictionary.get(key)
if temp_job:
self.updateSelf(temp_job)
def updateRemote(self):
if Config.USE_REDIS and self._remoteLocation is not None:
dict_hash = self._remoteLocation.split(":")[0]
key = self._remoteLocation.split(":")[1]
dictionary = TangoDictionary(dict_hash)
dictionary.set(key, self)
def updateSelf(self, other_job):
self.assigned = other_job.assigned
self.retries = other_job.retries
self.vm = other_job.vm
self.input = other_job.input
self.outputFile = other_job.outputFile
self.name = other_job.name
self.limitingKey = other_job.limitingKey
self.notifyURL = other_job.notifyURL
self.timeout = other_job.timeout
self.trace = other_job.trace
self.maxOutputFileSize = other_job.maxOutputFileSize
self.startTime = other_job.startTime
self.endTime = other_job.endTime
def TangoIntValue(object_name, obj):
if Config.USE_REDIS:
return TangoRemoteIntValue(object_name, obj)
else:
return TangoNativeIntValue(object_name, obj)
class TangoRemoteIntValue():
def __init__(self, name, value, namespace="intvalue"):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.__db = getRedisConnection()
self.key = '%s:%s' % (namespace, name)
cur_val = self.__db.get(self.key)
if cur_val is None:
self.set(value)
def increment(self):
return self.__db.incr(self.key)
def get(self):
return int(self.__db.get(self.key))
def set(self, val):
return self.__db.set(self.key, val)
class TangoNativeIntValue():
def __init__(self, name, value, namespace="intvalue"):
self.key = '%s:%s' % (namespace, name)
self.val = value
def increment(self):
self.val = self.val + 1
return self.val
def get(self):
return self.val
def set(self, val):
self.val = val
return val
def TangoQueue(object_name):
if Config.USE_REDIS:
return TangoRemoteQueue(object_name)
else:
return Queue.Queue()
class TangoRemoteQueue():
"""Simple Queue with Redis Backend"""
def __init__(self, name, namespace="queue"):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.__db = getRedisConnection()
self.key = '%s:%s' % (namespace, name)
self.name = name
# for debugging. return a readable string representation
def dump(self):
unpickled_obj = self.__db.lrange(self.key, 0, -1)
objs = []
for obj in unpickled_obj:
objs.append(pickle.loads(obj))
return objs
def qsize(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def put(self, item):
"""Put item into the queue."""
pickled_item = pickle.dumps(item)
self.__db.rpush(self.key, pickled_item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
item = self.__db.blpop(self.key, timeout=timeout)
else:
item = self.__db.lpop(self.key)
# if item:
# item = item[1]
item = pickle.loads(item)
return item
def make_empty(self):
while True:
item = self.__db.lpop(self.key)
if item is None:
break
def get_nowait(self):
"""Equivalent to get(False)."""
return self.get(False)
def __getstate__(self):
ret = {}
ret['key'] = self.key
return ret
def __setstate__(self, dict):
self.__db = getRedisConnection()
self.__dict__.update(dict)
# This is an abstract class that decides on
# if we should initiate a TangoRemoteDictionary or TangoNativeDictionary
# Since there are no abstract classes in Python, we use a simple method
def TangoDictionary(object_name):
if Config.USE_REDIS:
return TangoRemoteDictionary(object_name)
else:
return TangoNativeDictionary()
# Dictionary that maintains a separate dictionary D.
# Suppose the original dictionary contains mappings k --> v.
# Then a wrapping dictionary D will still contain mappings k --> v,
# but also maintains a side dictionary D' with mappings f(v) --> (k, v).
# This dictionary D' is stored as the "wrapped" field.
# f should not change over the relevant lifetime of the value.
class WrappingDictionary():
def __init__(self, object_name, dictionary, f):
self.wrapped = TangoDictionary(object_name)
self.f = f
self.dictionary = dictionary
def set(self, id, obj):
self.wrapped.set(self.f(obj), id)
return self.dictionary.set(id, obj)
def get(self, id):
return self.dictionary.get(id)
def getWrapped(self, k):
id = self.wrapped.get(k)
val = self.dictionary.get(id)
if id is None or val is None:
return None
else:
return (id, val)
def keys(self):
return self.dictionary.keys()
def values(self):
return self.dictionary.values()
def delete(self, id):
self.wrapped.delete(self.f(self.dictionary.get(id)))
return self.dictionary.delete(id)
def _clean(self):
self.wrapped._clean()
return self.dictionary._clean()
def iteritems(self):
return self.dictionary.iteritems();
class TangoRemoteDictionary():
def __init__(self, object_name):
self.r = getRedisConnection()
self.hash_name = object_name
self.log = logging.getLogger("TangoRemoteDictionary")
def set(self, id, obj):
pickled_obj = pickle.dumps(obj)
if hasattr(obj, '_remoteLocation'):
obj._remoteLocation = self.hash_name + ":" + str(id)
self.r.hset(self.hash_name, str(id), pickled_obj)
return str(id)
def get(self, id):
if str(id) in self.r.hkeys(self.hash_name):
unpickled_obj = self.r.hget(self.hash_name, str(id))
obj = pickle.loads(unpickled_obj)
return obj
else:
return None
def keys(self):
return self.r.hkeys(self.hash_name)
def values(self):
vals = self.r.hvals(self.hash_name)
valslist = []
for val in vals:
valslist.append(pickle.loads(val))
return valslist
def delete(self, id):
self._remoteLocation = None
self.r.hdel(self.hash_name, id)
def _clean(self):
# only for testing
self.r.delete(self.hash_name)
def iteritems(self):
# find all non-empty spots in the job id spectrum (actual jobs) and sort
# by the time of creation to prevent starvation of jobs with larger ids
return iter(sorted([(i, self.get(i)) for i in xrange(1,Config.MAX_JOBID+1)
if self.get(i) != None], key=lambda x: x[1].tm))
class TangoNativeDictionary():
def __init__(self):
self.dict = {}
def set(self, id, obj):
self.dict[str(id)] = obj
def get(self, id):
if str(id) in self.dict.keys():
return self.dict[str(id)]
else:
return None
def keys(self):
return self.dict.keys()
def values(self):
return self.dict.values()
def delete(self, id):
if str(id) in self.dict.keys():
del self.dict[str(id)]
def iteritems(self):
return iter(sorted([(i, self.get(i)) for i in xrange(1,Config.MAX_JOBID+1)
if self.get(i) != None], key=lambda x: x[1].tm))
def _clean(self):
# only for testing
return
|
[
"pickle.loads",
"Queue.Queue",
"datetime.timedelta",
"redis.StrictRedis",
"datetime.datetime.now",
"logging.getLogger",
"pickle.dumps"
] |
[((522, 597), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'Config.REDIS_HOSTNAME', 'port': 'Config.REDIS_PORT', 'db': '(0)'}), '(host=Config.REDIS_HOSTNAME, port=Config.REDIS_PORT, db=0)\n', (539, 597), False, 'import redis\n'), ((3048, 3062), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3060, 3062), False, 'from datetime import datetime, timedelta\n'), ((4235, 4249), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4247, 4249), False, 'from datetime import datetime, timedelta\n'), ((4428, 4442), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4440, 4442), False, 'from datetime import datetime, timedelta\n'), ((7623, 7636), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (7634, 7636), False, 'import Queue\n'), ((8541, 8559), 'pickle.dumps', 'pickle.dumps', (['item'], {}), '(item)\n', (8553, 8559), False, 'import pickle\n'), ((9043, 9061), 'pickle.loads', 'pickle.loads', (['item'], {}), '(item)\n', (9055, 9061), False, 'import pickle\n'), ((11402, 11444), 'logging.getLogger', 'logging.getLogger', (['"""TangoRemoteDictionary"""'], {}), "('TangoRemoteDictionary')\n", (11419, 11444), False, 'import logging\n'), ((11496, 11513), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (11508, 11513), False, 'import pickle\n'), ((5003, 5014), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (5012, 5014), False, 'from datetime import datetime, timedelta\n'), ((11865, 11892), 'pickle.loads', 'pickle.loads', (['unpickled_obj'], {}), '(unpickled_obj)\n', (11877, 11892), False, 'import pickle\n'), ((5067, 5081), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5079, 5081), False, 'from datetime import datetime, timedelta\n'), ((8175, 8192), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (8187, 8192), False, 'import pickle\n'), ((12161, 12178), 'pickle.loads', 'pickle.loads', (['val'], {}), '(val)\n', (12173, 12178), False, 'import pickle\n'), ((3623, 3637), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3635, 3637), False, 'from datetime import datetime, timedelta\n')]
|
# -*- coding: utf-8 -*-
from django.db import models
from model_utils import Choices
class Persona(models.Model):
nombres = models.CharField(max_length=100, blank=True, null=True)
apellidos = models.CharField(max_length=100, blank=True, null=True)
razon_social = models.CharField(max_length=100, blank=True, null=True)
dni = models.CharField(max_length=10, blank=True, null=True, unique=True)
cuit = models.CharField(max_length=15, blank=True, null=True)
domicilio = models.ForeignKey("Domicilio")
telefono = models.CharField(max_length=20)
def __unicode__(self):
return "%s, %s (%s)" % (self.apellidos, self.nombres, self.dni)
class Titulo(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Profesional(models.Model):
nombres = models.CharField(max_length=100)
apellidos = models.CharField(max_length=100)
dni = models.CharField('DNI',
max_length=10,
blank=True,
null=True)
titulo = models.ForeignKey(Titulo, blank=True, null=True)
matricula = models.CharField('Número Matrícula',
max_length=50,
blank=True,
null=True)
telefono = models.CharField('Teléfono',
max_length=20,
blank=True,
null=True)
def __unicode__(self):
return "%s, %s" % (self.apellidos, self.nombres)
class Meta:
verbose_name_plural = 'Profesionales'
class Domicilio(models.Model):
direccion = models.CharField(max_length=80)
descripcion = models.TextField(blank=True, null=True)
localidad = models.ForeignKey('Localidad')
def __unicode__(self):
return self.direccion
class Localidad(models.Model):
nombre = models.CharField(max_length=50)
codigo_postal = models.CharField(max_length=15)
def __unicode__(self):
return "%s (%s)" % (self.nombre, self.codigo_postal)
class Meta:
verbose_name_plural = 'Localidades'
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
] |
[((132, 187), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (148, 187), False, 'from django.db import models\n'), ((204, 259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (220, 259), False, 'from django.db import models\n'), ((279, 334), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (295, 334), False, 'from django.db import models\n'), ((346, 413), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)', 'unique': '(True)'}), '(max_length=10, blank=True, null=True, unique=True)\n', (362, 413), False, 'from django.db import models\n'), ((425, 479), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'blank': '(True)', 'null': '(True)'}), '(max_length=15, blank=True, null=True)\n', (441, 479), False, 'from django.db import models\n'), ((497, 527), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Domicilio"""'], {}), "('Domicilio')\n", (514, 527), False, 'from django.db import models\n'), ((543, 574), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (559, 574), False, 'from django.db import models\n'), ((718, 750), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (734, 750), False, 'from django.db import models\n'), ((855, 887), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (871, 887), False, 'from django.db import models\n'), ((904, 936), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (920, 936), False, 'from django.db import models\n'), ((947, 1008), 'django.db.models.CharField', 'models.CharField', (['"""DNI"""'], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), "('DNI', max_length=10, blank=True, null=True)\n", (963, 1008), False, 'from django.db import models\n'), ((1103, 1151), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Titulo'], {'blank': '(True)', 'null': '(True)'}), '(Titulo, blank=True, null=True)\n', (1120, 1151), False, 'from django.db import models\n'), ((1168, 1242), 'django.db.models.CharField', 'models.CharField', (['"""Número Matrícula"""'], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), "('Número Matrícula', max_length=50, blank=True, null=True)\n", (1184, 1242), False, 'from django.db import models\n'), ((1357, 1423), 'django.db.models.CharField', 'models.CharField', (['"""Teléfono"""'], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), "('Teléfono', max_length=20, blank=True, null=True)\n", (1373, 1423), False, 'from django.db import models\n'), ((1717, 1748), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (1733, 1748), False, 'from django.db import models\n'), ((1767, 1806), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1783, 1806), False, 'from django.db import models\n'), ((1823, 1853), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Localidad"""'], {}), "('Localidad')\n", (1840, 1853), False, 'from django.db import models\n'), ((1958, 1989), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1974, 1989), False, 'from django.db import models\n'), ((2010, 2041), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (2026, 2041), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2016 SWITCH http://www.switch.ch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import re
import json
__version__ = '1.7.0'
# default ceph values
CEPH_ADM_COMMAND = '/usr/sbin/cephadm'
CEPH_COMMAND = '/usr/bin/ceph'
# nagios exit code
STATUS_OK = 0
STATUS_WARNING = 1
STATUS_ERROR = 2
STATUS_UNKNOWN = 3
def main():
# parse args
parser = argparse.ArgumentParser(description="'ceph health' nagios plugin.")
parser.add_argument('-e','--exe', help='ceph executable [%s]' % CEPH_COMMAND)
parser.add_argument('-A','--admexe', help='cephadm executable [%s]' % CEPH_ADM_COMMAND)
parser.add_argument('--cluster', help='ceph cluster name')
parser.add_argument('-c','--conf', help='alternative ceph conf file')
parser.add_argument('-m','--monaddress', help='ceph monitor address[:port]')
parser.add_argument('-i','--id', help='ceph client id')
parser.add_argument('-n','--name', help='ceph client name')
parser.add_argument('-k','--keyring', help='ceph client keyring file')
parser.add_argument('--check', help='regexp of which check(s) to check (luminous+) '
"Can be inverted, e.g. '^((?!(PG_DEGRADED|OBJECT_MISPLACED)$).)*$'")
parser.add_argument('-w','--whitelist', help='whitelist regexp for ceph health warnings')
parser.add_argument('-d','--detail', help="exec 'ceph health detail'", action='store_true')
parser.add_argument('-V','--version', help='show version and exit', action='store_true')
parser.add_argument('-a','--cephadm', help='uses cephadm to execute the command', action='store_true')
parser.add_argument('-s','--skip-muted', help='skip muted checks', action='store_true')
args = parser.parse_args()
# validate args
cephadm_exec = args.admexe if args.admexe else CEPH_ADM_COMMAND
ceph_exec = args.exe if args.exe else CEPH_COMMAND
if args.cephadm:
if not os.path.exists(cephadm_exec):
print("ERROR: cephadm executable '%s' doesn't exist" % cephadm_exec)
return STATUS_UNKNOWN
else:
if not os.path.exists(ceph_exec):
print("ERROR: ceph executable '%s' doesn't exist" % ceph_exec)
return STATUS_UNKNOWN
if args.version:
print('version %s' % __version__)
return STATUS_OK
if args.conf and not os.path.exists(args.conf):
print("ERROR: ceph conf file '%s' doesn't exist" % args.conf)
return STATUS_UNKNOWN
if args.keyring and not os.path.exists(args.keyring):
print("ERROR: keyring file '%s' doesn't exist" % args.keyring)
return STATUS_UNKNOWN
# build command
ceph_health = [ceph_exec]
if args.cephadm:
# Prepend the command with the cephadm binary and the shell command
ceph_health = [cephadm_exec, 'shell'] + ceph_health
if args.monaddress:
ceph_health.append('-m')
ceph_health.append(args.monaddress)
if args.cluster:
ceph_health.append('--cluster')
ceph_health.append(args.cluster)
if args.conf:
ceph_health.append('-c')
ceph_health.append(args.conf)
if args.id:
ceph_health.append('--id')
ceph_health.append(args.id)
if args.name:
ceph_health.append('--name')
ceph_health.append(args.name)
if args.keyring:
ceph_health.append('--keyring')
ceph_health.append(args.keyring)
ceph_health.append('health')
if args.detail:
ceph_health.append('detail')
ceph_health.append('--format')
ceph_health.append('json')
#print(ceph_health)
# exec command
p = subprocess.Popen(ceph_health,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output, err = p.communicate()
try:
output = json.loads(output)
except ValueError:
output = dict()
# parse output
# print "output:", output
#print "err:", err
if output:
ret = STATUS_OK
msg = ""
extended = []
if 'checks' in output:
#luminous
for check,status in output['checks'].items():
# skip check if not selected
if args.check and not re.search(args.check, check):
continue
if args.skip_muted and ('muted' in status and status['muted']):
continue
check_detail = "%s( %s )" % (check, status['summary']['message'])
if status["severity"] == "HEALTH_ERR":
extended.append(msg)
msg = "CRITICAL: %s" % check_detail
ret = STATUS_ERROR
continue
if args.whitelist and re.search(args.whitelist,status['summary']['message']):
continue
check_msg = "WARNING: %s" % check_detail
if not msg:
msg = check_msg
ret = STATUS_WARNING
else:
extended.append(check_msg)
else:
#pre-luminous
for status in output["summary"]:
if status != "HEALTH_OK":
if status == "HEALTH_ERROR":
msg = "CRITICAL: %s" % status['summary']
ret = STATUS_ERROR
continue
if args.whitelist and re.search(args.whitelist,status['summary']):
continue
if not msg:
msg = "WARNING: %s" % status['summary']
ret = STATUS_WARNING
else:
extended.append("WARNING: %s" % status['summary'])
if msg:
print(msg)
else:
print("HEALTH OK")
if extended: print('\n'.join(extended))
return ret
elif err:
# read only first line of error
one_line = err.split('\n')[0]
if '-1 ' in one_line:
idx = one_line.rfind('-1 ')
print('ERROR: %s: %s' % (ceph_exec, one_line[idx+len('-1 '):]))
else:
print(one_line)
return STATUS_UNKNOWN
if __name__ == "__main__":
sys.exit(main())
|
[
"subprocess.Popen",
"argparse.ArgumentParser",
"json.loads",
"os.path.exists",
"re.search"
] |
[((1026, 1093), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\'ceph health\' nagios plugin."""'}), '(description="\'ceph health\' nagios plugin.")\n', (1049, 1093), False, 'import argparse\n'), ((4262, 4339), 'subprocess.Popen', 'subprocess.Popen', (['ceph_health'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(ceph_health, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (4278, 4339), False, 'import subprocess\n'), ((4398, 4416), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (4408, 4416), False, 'import json\n'), ((2561, 2589), 'os.path.exists', 'os.path.exists', (['cephadm_exec'], {}), '(cephadm_exec)\n', (2575, 2589), False, 'import os\n'), ((2731, 2756), 'os.path.exists', 'os.path.exists', (['ceph_exec'], {}), '(ceph_exec)\n', (2745, 2756), False, 'import os\n'), ((2982, 3007), 'os.path.exists', 'os.path.exists', (['args.conf'], {}), '(args.conf)\n', (2996, 3007), False, 'import os\n'), ((3138, 3166), 'os.path.exists', 'os.path.exists', (['args.keyring'], {}), '(args.keyring)\n', (3152, 3166), False, 'import os\n'), ((5322, 5377), 're.search', 're.search', (['args.whitelist', "status['summary']['message']"], {}), "(args.whitelist, status['summary']['message'])\n", (5331, 5377), False, 'import re\n'), ((4810, 4838), 're.search', 're.search', (['args.check', 'check'], {}), '(args.check, check)\n', (4819, 4838), False, 'import re\n'), ((5989, 6033), 're.search', 're.search', (['args.whitelist', "status['summary']"], {}), "(args.whitelist, status['summary'])\n", (5998, 6033), False, 'import re\n')]
|
from easydict import EasyDict
D = EasyDict()
D.num_gpus = 4
D.batch_size = 24
D.epochs = 80
D.decay_epochs = 20
D.decay_rate = 0.5
D.learning_rate = 1e-3
D.input_dataset = 'ec_pf_tp_AT24_33x33_025' #'multiorigin_cf_tp_AT24_33x33_025'
D.block_type = 'nolocal2d' # nolocal2d conv2d
D.merge_type = 'add' # concat add
D.model_dir = './summary_and_ckpt/'
D.is_test = False
D.is_cross = False
D.sub_dir = 'cross/'
D.data_dir = './datasets/'
D.num_filters = 64
D.cut_dim = 16
D.input_h = 33
D.input_w = 33
D.splited_channel = 50
D.input_channel = 50
D.out_channel = 1
D.res_dense_block = 4
D.dense_block = 3
D.in_dense_layers = 4
D.enable_function = False
D.model_name_reg = "model.ckpt"
|
[
"easydict.EasyDict"
] |
[((35, 45), 'easydict.EasyDict', 'EasyDict', ([], {}), '()\n', (43, 45), False, 'from easydict import EasyDict\n')]
|
# This binary_search_tree.py is an implementation of binary search tree based on the idea from CLRS, Chapter 12
from tree_visualization import tree_visualize
class Binary_tree:
def __init__(self):
self.root = None
def node(self, key, p=None, left=None, right=None):
return {
"key": key,
"p": p,
"left": left,
"right": right
}
def tree_insert(self, key):
new_node = self.node(key)
p = None
node = self.root
while node is not None:
p = node
if new_node["key"] < node["key"]:
node = node["left"]
else:
node = node["right"]
new_node["p"] = p
if p is None:
# Tree is empty
self.root = new_node
elif new_node["key"] < p["key"]:
p["left"] = new_node
else:
p["right"] = new_node
def inorder_tree_walk(self, alternative_root="not provided"):
if alternative_root == "not provided":
root = self.root
else:
root = alternative_root
if root is not None:
self.inorder_tree_walk(root["left"])
print(str(root["key"]) + ", ", end="")
self.inorder_tree_walk(root["right"])
def tree_search(self, k):
node = self.root
while node is not None and node["key"] != k:
if k < node["key"]:
node = node["left"]
else:
node = node["right"]
return node
def tree_minimum(self, node=None):
if node is None:
node = self.root
while node["left"] is not None:
node = node["left"]
return node
def tree_maximum(self, node=None):
if node is None:
node = self.root
while node["right"] is not None:
node = node["right"]
return node
def tree_successor(self, node=None):
if node is None:
node = self.root
if node["right"] is not None:
return self.tree_minimum(node["right"])
p = node["p"]
while p is not None and p["right"] is not None and node["key"] == p["right"]["key"]:
node = p
p = node["p"]
return p
def transplant(self, u, v):
if u["p"] is None:
self.root = v
elif u["p"]["left"] is not None and u["key"] == u["p"]["left"]["key"]:
u["p"]["left"] = v
else:
u["p"]["right"] = v
if v is not None:
v["p"] = u["p"]
def tree_delete(self, k):
z = self.tree_search(k)
if z is None:
return z
if z["left"] is None:
self.transplant(z, z["right"])
elif z["right"] is None:
self.transplant(z, z["left"])
else:
y = self.tree_minimum(z["right"])
if y["p"]["key"] != z["key"]:
self.transplant(y, y["right"])
y["right"] = z["right"]
if y["right"] is not None:
y["right"]["p"] = y
self.transplant(z, y)
y["left"] = z["left"]
y["left"]["p"] = y
# Running simple examples
my_tree = Binary_tree()
my_tree.tree_insert(18)
my_tree.tree_insert(14)
my_tree.tree_insert(25)
my_tree.tree_insert(1)
my_tree.tree_insert(21)
my_tree.tree_insert(19)
my_tree.tree_insert(12)
my_tree.tree_insert(23)
my_tree.tree_insert(16)
print("my_tree.root " + str(my_tree.root))
my_tree.inorder_tree_walk()
tree_visualize(my_tree.root)
# tree_visualize(my_tree.root, True)
print("my_tree.tree_search(18)[key]: " + str(my_tree.tree_search(18)["key"]))
print("my_tree.tree_minimum()[key]: " + str(my_tree.tree_minimum()["key"]))
print("my_tree.tree_maximum()[key]: " + str(my_tree.tree_maximum()["key"]))
print("my_tree.tree_successor()[key]: " + str(my_tree.tree_successor()["key"]))
print("my_tree.tree_successor(my_tree.tree_search(1))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(1))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(16))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(16))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(18))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(18))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(12))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(12))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(21))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(21))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(23))[key]: " + str(my_tree.tree_successor(my_tree.tree_search(23))["key"]))
print("my_tree.tree_successor(my_tree.tree_search(25)): " + str(my_tree.tree_successor(my_tree.tree_search(25))))
tree_visualize(my_tree.root)
my_tree.tree_delete(25)
my_tree.tree_delete(14)
my_tree.tree_delete(18)
my_tree.tree_delete(1)
my_tree.tree_insert(18)
tree_visualize(my_tree.root)
|
[
"tree_visualization.tree_visualize"
] |
[((3595, 3623), 'tree_visualization.tree_visualize', 'tree_visualize', (['my_tree.root'], {}), '(my_tree.root)\n', (3609, 3623), False, 'from tree_visualization import tree_visualize\n'), ((4841, 4869), 'tree_visualization.tree_visualize', 'tree_visualize', (['my_tree.root'], {}), '(my_tree.root)\n', (4855, 4869), False, 'from tree_visualization import tree_visualize\n'), ((4989, 5017), 'tree_visualization.tree_visualize', 'tree_visualize', (['my_tree.root'], {}), '(my_tree.root)\n', (5003, 5017), False, 'from tree_visualization import tree_visualize\n')]
|
# Generated by Django 2.2.4 on 2019-11-03 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recupero', '0002_auto_20191103_1159'),
]
operations = [
migrations.AddField(
model_name='tipoprestacion',
name='anio_update',
field=models.PositiveIntegerField(default=2019, help_text='Si viene del nomenclador indicar de que versión es'),
),
]
|
[
"django.db.models.PositiveIntegerField"
] |
[((348, 458), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(2019)', 'help_text': '"""Si viene del nomenclador indicar de que versión es"""'}), "(default=2019, help_text=\n 'Si viene del nomenclador indicar de que versión es')\n", (375, 458), False, 'from django.db import migrations, models\n')]
|
from django.shortcuts import render
def error_404(request,*args, **argv):
data = {}
return render(request, 'error_404.html', data)
|
[
"django.shortcuts.render"
] |
[((109, 148), 'django.shortcuts.render', 'render', (['request', '"""error_404.html"""', 'data'], {}), "(request, 'error_404.html', data)\n", (115, 148), False, 'from django.shortcuts import render\n')]
|
import requests, random, time
from bs4 import BeautifulSoup
#These functions are what I should have used in the first place lol
def getter(url): #extracts images from a url and returns all the images as a list
try:
imglist = []
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
imglist.append(img['data-src'])
else:
#print("fuck")
pass
#print(imglist, " - ---- -- -")
return imglist
except Exception as e:
print(e)
def pager(start, num=3): # this function is useful
nummy = 1
imlist = getter(start+str(nummy))
while len(imlist) < num:
print(1)
imlist.append(getter(start + str(nummy)))
nummy +=1
resultP = imlist[:num]
return resultP
#class horni:
#Go to horny jail
# main = "https://yiff-party.com/"
def randomIMG(): # this function is an abomination and I should have used getter() and pager() instead but I'm too lazy to change it now
try:
listofimg = []
pageNUM = random.randint(5,480)
page = requests.get(f"https://yiff-party.com/page/{pageNUM}/")
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
listofimg.append(img['data-src'])
else:
#print("fuck")
pass
result = random.choice(listofimg)
#print(result)
return result
except Exception as e:
print(e)
def newest(cat="main"): # this function is even more of an abomination and I should have used getter() and pager() instead but I'm too lazy to change it now
# It returns the newest image and only the newest image
try:
listofimg = []
if "gay" in cat:
page = requests.get("https://yiff-party.com/genre/male-male/")
elif "lesbian" in cat:
page = requests.get("https://yiff-party.com/genre/female-female/")
elif "straight" in cat:
page = requests.get("https://yiff-party.com/genre/male-female/")
elif "animated" in cat:
page = requests.get("https://yiff-party.com/category/yiff-animated/")
elif "anthro" in cat:
page = requests.get("https://yiff-party.com/genre/anthro/")
elif "feral" in cat:
page = requests.get("https://yiff-party.com/genre/feral/")
else:
page = requests.get("https://yiff-party.com/")
soup = BeautifulSoup(page.content, 'html.parser')
mydivs = (soup.find_all("div", {"class": "section-content"}))
bs = BeautifulSoup(str(mydivs), 'html.parser')
images = bs.find_all('img')
for img in images:
#print(img)
if img.has_attr('data-src'):
#print(img['data-src'])
listofimg.append(img['data-src'])
else:
#print("fuck")
pass
output = listofimg[0]
return output
except Exception as e:
print(e)
def stream(cat="main"):
if "gay" in cat:
url ="https://yiff-party.com/genre/male-male/"
elif "lesbian" in cat:
url = "https://yiff-party.com/genre/female-female/"
elif "straight" in cat:
url = "https://yiff-party.com/genre/male-female/"
elif "animated" in cat:
url = "https://yiff-party.com/category/yiff-animated/"
elif "anthro" in cat:
url = "https://yiff-party.com/genre/anthro/"
elif "feral" in cat:
url = "https://yiff-party.com/genre/feral/page/"
else:
url = "https://yiff-party.com/"
base = getter(url)
del(base[0])
while True:
face = getter(url)
if face == base:
time.sleep(600)
else:
for i in face:
if i in base:
pass
else:
yield i
base = face
time.sleep(600)
def yiff(num, cat="main"):
try:
listofimg = []
if "gay" in cat:
listofimg.append(pager("https://yiff-party.com/genre/male-male/page/", num))
elif "lesbian" in cat:
listofimg.append(pager("https://yiff-party.com/genre/female-female/page/", num))
elif "straight" in cat:
listofimg.append(pager("https://yiff-party.com/genre/male-female/page/", num))
elif "animated" in cat:
listofimg.append(pager("https://yiff-party.com/category/yiff-animated/page/", num))
elif "anthro" in cat:
listofimg.append(pager("https://yiff-party.com/genre/anthro/page/", num))
elif "feral" in cat:
listofimg.append(pager("https://yiff-party.com/genre/feral/page/", num))
else:
listofimg.append(pager("https://yiff-party.com/page/", num))
return(listofimg)
except Exception as e:
print(e)
def help():
print("""Welcome to the horniest python package every written!
This code is designed to help you interact with yiff-party.com without having to without having to write your own code. It can pull your chosen number of the latest images from any of the 6 categories. It can pull a random image from any category and it also provide's a live feature called 'stream' which allows you to iterate over subbmissions as they are uploaded to the website!
Usage:
print(horni.randomIMG())
> result will be a random image url
print(horni.newsest("gay"))
> result will be the newsest image url in the 'gay' category.
You can input any of the six categories or 'main' for the main page which icludes all categories
(gay/lesbian/straight/animated/anthro/feral/main)
for image in horni.yiff(50,"anthro"):
print(image)
>this will return a list of 50 images in the anthro category
for image in horni.stream("main"):
print(image)
>This loop will run forever, printing out the images urls as they are uploaded to the site.
This code was originally written by Glass-Paramedic for qweter1006
:)
""")
|
[
"random.randint",
"random.choice",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup"
] |
[((256, 273), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (268, 273), False, 'import requests, random, time\n'), ((286, 328), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (299, 328), False, 'from bs4 import BeautifulSoup\n'), ((1328, 1350), 'random.randint', 'random.randint', (['(5)', '(480)'], {}), '(5, 480)\n', (1342, 1350), False, 'import requests, random, time\n'), ((1362, 1417), 'requests.get', 'requests.get', (['f"""https://yiff-party.com/page/{pageNUM}/"""'], {}), "(f'https://yiff-party.com/page/{pageNUM}/')\n", (1374, 1417), False, 'import requests, random, time\n'), ((1430, 1472), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (1443, 1472), False, 'from bs4 import BeautifulSoup\n'), ((1866, 1890), 'random.choice', 'random.choice', (['listofimg'], {}), '(listofimg)\n', (1879, 1890), False, 'import requests, random, time\n'), ((2884, 2926), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (2897, 2926), False, 'from bs4 import BeautifulSoup\n'), ((2249, 2304), 'requests.get', 'requests.get', (['"""https://yiff-party.com/genre/male-male/"""'], {}), "('https://yiff-party.com/genre/male-male/')\n", (2261, 2304), False, 'import requests, random, time\n'), ((4061, 4076), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (4071, 4076), False, 'import requests, random, time\n'), ((4216, 4231), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (4226, 4231), False, 'import requests, random, time\n'), ((2347, 2406), 'requests.get', 'requests.get', (['"""https://yiff-party.com/genre/female-female/"""'], {}), "('https://yiff-party.com/genre/female-female/')\n", (2359, 2406), False, 'import requests, random, time\n'), ((2450, 2507), 'requests.get', 'requests.get', (['"""https://yiff-party.com/genre/male-female/"""'], {}), "('https://yiff-party.com/genre/male-female/')\n", (2462, 2507), False, 'import requests, random, time\n'), ((2555, 2617), 'requests.get', 'requests.get', (['"""https://yiff-party.com/category/yiff-animated/"""'], {}), "('https://yiff-party.com/category/yiff-animated/')\n", (2567, 2617), False, 'import requests, random, time\n'), ((2659, 2711), 'requests.get', 'requests.get', (['"""https://yiff-party.com/genre/anthro/"""'], {}), "('https://yiff-party.com/genre/anthro/')\n", (2671, 2711), False, 'import requests, random, time\n'), ((2752, 2803), 'requests.get', 'requests.get', (['"""https://yiff-party.com/genre/feral/"""'], {}), "('https://yiff-party.com/genre/feral/')\n", (2764, 2803), False, 'import requests, random, time\n'), ((2830, 2869), 'requests.get', 'requests.get', (['"""https://yiff-party.com/"""'], {}), "('https://yiff-party.com/')\n", (2842, 2869), False, 'import requests, random, time\n')]
|
from unittest import mock
from pytest import fixture
@fixture
def url(route_url):
return route_url('configuration')
@mock.patch('senic_hub.backend.commands.supervisor.program_status')
@mock.patch('senic_hub.backend.commands.supervisor.start_program')
@mock.patch('senic_hub.backend.views.config.sleep')
@mock.patch('senic_hub.backend.views.config.stop_program')
def test_setup_create_config_returns_200(
stop_program_mock, sleep_mock, start_program_mock, program_status_mock,
url, browser):
program_status_mock.return_value = 'STOPPED'
assert browser.post_json(url, {}, status=200)
stop_program_mock.assert_called_once_with('device_discovery')
start_program_mock.assert_has_calls([
mock.call('nuimo_app')
])
@mock.patch('senic_hub.backend.views.config.subprocess.Popen')
def test_setup_delete_config_returns_200_and_creates_files(
subprocess_mock, url, browser):
assert browser.delete(url, status=200)
subprocess_mock.assert_called_with(['/usr/bin/senic_hub_factory_reset'])
|
[
"unittest.mock.patch",
"unittest.mock.call"
] |
[((126, 192), 'unittest.mock.patch', 'mock.patch', (['"""senic_hub.backend.commands.supervisor.program_status"""'], {}), "('senic_hub.backend.commands.supervisor.program_status')\n", (136, 192), False, 'from unittest import mock\n'), ((194, 259), 'unittest.mock.patch', 'mock.patch', (['"""senic_hub.backend.commands.supervisor.start_program"""'], {}), "('senic_hub.backend.commands.supervisor.start_program')\n", (204, 259), False, 'from unittest import mock\n'), ((261, 311), 'unittest.mock.patch', 'mock.patch', (['"""senic_hub.backend.views.config.sleep"""'], {}), "('senic_hub.backend.views.config.sleep')\n", (271, 311), False, 'from unittest import mock\n'), ((313, 370), 'unittest.mock.patch', 'mock.patch', (['"""senic_hub.backend.views.config.stop_program"""'], {}), "('senic_hub.backend.views.config.stop_program')\n", (323, 370), False, 'from unittest import mock\n'), ((764, 825), 'unittest.mock.patch', 'mock.patch', (['"""senic_hub.backend.views.config.subprocess.Popen"""'], {}), "('senic_hub.backend.views.config.subprocess.Popen')\n", (774, 825), False, 'from unittest import mock\n'), ((731, 753), 'unittest.mock.call', 'mock.call', (['"""nuimo_app"""'], {}), "('nuimo_app')\n", (740, 753), False, 'from unittest import mock\n')]
|
""" Saved progress for Rigor, allowing users to resume long-running runs that fail part way through """
import rigor.logger
import tempfile
import time
import cPickle as pickle
import os
kPickleProtocol = pickle.HIGHEST_PROTOCOL
class Checkpoint(object):
""" Saved checkpoint results, loaded from a file """
def __init__(self, timestamp, parameters, seen, results):
"""
:param timestamp: when the checkpoint file was first created
:param dict parameters: parameters used in the original run
:param set(int) seen: a set of IDs that have been checkpointed already, to make it easy to skip duplicate evaluations
:param results: the saved results
"""
self.timestamp = timestamp
self.parameters = parameters
self.seen = seen
self.results = results
class NullCheckpointer(object):
"""
Does nothing. Used in place of actual checkpointer to make code simpler in :py:class:`~rigor.Runner`.
"""
def log(self, id, entry, flush=True):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
pass
class Checkpointer(object):
"""
Saves progress of algorithm evaluations in a file, to be loaded later
if there is an error and the evaluation is interrupted
"""
def __init__(self, parameters, checkpoint_file=None, delete_on_success=True):
"""
:param parameters: parameters that were used to generate checkpointed results
:param file checkpoint_file: open file to use for checkpointing, or :py:class:`None` to create a new one
:param delete_on_success: whether to delete the checkpoint file when closed
"""
self._logger = rigor.logger.get_logger('.'.join((__name__, self.__class__.__name__)))
self._parameters = parameters
if not checkpoint_file:
self._file = tempfile.NamedTemporaryFile('wb', prefix='rigor-checkpoint-', delete=False)
self.filename = self._file.name
else:
self._file = checkpoint_file
self.filename = checkpoint_file.name
self._delete = delete_on_success
self._write_header()
self._logger.info("Checkpoint filename is {}".format(self.filename))
def _write_header(self):
""" Writes an identifying header to the checkpoint file """
pickle.dump(time.time(), self._file, kPickleProtocol)
pickle.dump(self._parameters, self._file, kPickleProtocol)
def log(self, id, entry, flush=True):
"""
Logs a checkpoint entry to the file
:param id: The percept ID
:param entry: structured data returned from Algorithm.apply()
:param flush: whether to flush file output with each log entry (safer, but slower if processing each percept is very quick)
"""
pickle.dump((id, entry), self._file, kPickleProtocol)
if flush:
self._file.flush()
def close(self, success):
"""
Closes the checkpoint file.
:param success: whether operation finished successfully
"""
self._file.close()
if self._delete and success:
os.remove(self.filename)
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.close(exc_type is None)
@classmethod
def read_header(cls, checkpoint_file):
"""
Loads just the header portion of a checkpoint file.
:param checkpoint_file: file open in :py:const:`rb` mode containing a checkpoint
"""
timestamp = pickle.load(checkpoint_file)
parameters = pickle.load(checkpoint_file)
return timestamp, parameters
@classmethod
def resume(cls, old_file, new_file=None, delete_on_success=True):
"""
Resumes from an existing checkpoint file.
:param file old_file: existing open checkpoint file to resume from
:param file new_file: open new checkpoint file (must be different from the old file)
:param delete_on_success: whether to delete the new checkpoint file when closed, if successful
:return: (Checkpointer object, Checkpoint object)
"""
timestamp, parameters = cls.read_header(old_file)
checkpointer = cls(parameters, new_file, delete_on_success)
entries = list()
seen = set()
while True:
try:
id, entry = pickle.load(old_file)
seen.add(id)
entries.append(entry)
checkpointer.log(id, entry, flush=False)
except EOFError:
break
return checkpointer, Checkpoint(timestamp, parameters, seen, entries)
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"cPickle.load",
"time.time",
"cPickle.dump"
] |
[((2207, 2265), 'cPickle.dump', 'pickle.dump', (['self._parameters', 'self._file', 'kPickleProtocol'], {}), '(self._parameters, self._file, kPickleProtocol)\n', (2218, 2265), True, 'import cPickle as pickle\n'), ((2577, 2630), 'cPickle.dump', 'pickle.dump', (['(id, entry)', 'self._file', 'kPickleProtocol'], {}), '((id, entry), self._file, kPickleProtocol)\n', (2588, 2630), True, 'import cPickle as pickle\n'), ((3211, 3239), 'cPickle.load', 'pickle.load', (['checkpoint_file'], {}), '(checkpoint_file)\n', (3222, 3239), True, 'import cPickle as pickle\n'), ((3255, 3283), 'cPickle.load', 'pickle.load', (['checkpoint_file'], {}), '(checkpoint_file)\n', (3266, 3283), True, 'import cPickle as pickle\n'), ((1740, 1815), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""wb"""'], {'prefix': '"""rigor-checkpoint-"""', 'delete': '(False)'}), "('wb', prefix='rigor-checkpoint-', delete=False)\n", (1767, 1815), False, 'import tempfile\n'), ((2163, 2174), 'time.time', 'time.time', ([], {}), '()\n', (2172, 2174), False, 'import time\n'), ((2849, 2873), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (2858, 2873), False, 'import os\n'), ((3946, 3967), 'cPickle.load', 'pickle.load', (['old_file'], {}), '(old_file)\n', (3957, 3967), True, 'import cPickle as pickle\n')]
|
#!/usr/bin/env python3
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import keras
import numpy as np
import tensorflow as tf
from absl import app
from data import simulation_pb2
from bin.load_batch import load_batch
from bin.data_visualization import map_id_to_units_race
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d.axes3d import Axes3D
from bin.util import *
from lib.unit_constants import *
from lib.config import REPLAYS_PARSED_DIR, REPLAY_DIR, REPO_DIR, STANDARD_VERSION
def main():
learning_rates = [0.05]
beta1 = [0.9, 0.7, 0.6, 0.5]
beta2 = [0.95, 0.85, 0.75, 0.65]
epsilon = 1e-06
training_epochs = 50
trackAcc = []
trackAccs = []
trackCost = []
trackCosts = []
for learning_rate in learning_rates:
for b1 in beta1:
for b2 in beta2:
print("Run gradient descent with Learning Rate: %-6s --- Beta1: %-4s --- Beta2: %-5s" % (learning_rate, b1, b2))
trackAcc, trackCost = run_grad_desc(learning_rate, training_epochs, b1, b2, epsilon)
trackAccs.append(trackAcc)
trackCosts.append(trackCost)
create_graphs(trackAccs, trackCosts, learning_rates, training_epochs, beta1, beta2)
def run_grad_desc(learning_rate, training_epochs, b1, b2, eps):
# Graph Input
x = tf.placeholder(tf.float32, [None, 94])
y = tf.placeholder(tf.float32, [None, 3])
# initialize weight and bias
W_1 = tf.Variable(tf.truncated_normal([94, 94]))
W_2 = tf.Variable(tf.truncated_normal([94, 47]))
W_3 = tf.Variable(tf.truncated_normal([47, 3]))
# Construct Model
x_ = tf.matmul(x, W_1)
x_ = tf.matmul(x_, W_2)
logits = tf.matmul(x_, W_3)
pred = tf.nn.softmax(logits)
# minimize error using cross entropy
# cross_entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.contrib.opt.NadamOptimizer(learning_rate, b1, b2, eps).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
trackAcc = []
trackCost = []
with tf.Session() as s:
s.run(init)
xs_train, xs_test, ys_train, ys_test = load(version=['1_3d'], file_version='multiple')
# loop to train for specified number of epochs
for epoch in range(training_epochs):
_, c = s.run([optimizer, cost], feed_dict={x: xs_train, y: ys_train})
acc = s.run(accuracy, feed_dict={x: xs_test, y: ys_test})
# track accuracy to display in graph when algorithm finished
trackCost.append(c)
trackAcc.append(acc*100)
#print('Epoch:', '%04d' % (epoch+1), "completed with an accuracy of:", "{:.3f}".format(acc), "cost=", "{:.9f}".format(c))
# evaluate accuary when all training steps are completed
print ("Accuracy:", accuracy.eval({x: xs_test, y: ys_test}))
trackAcc = np.array(trackAcc)
return trackAcc, trackCost
def create_graphs(trackAcc, trackCost, learning_rate, training_epochs, b1, b2):
# create graph
fig = plt.figure(figsize=plt.figaspect(4.))
# add plot
ax = fig.add_subplot(2,1,1)
# create array that corresponds to the number of training steps as x-axis
# y-axis is the accuracy in %
a = np.arange(1, training_epochs+1)
b = np.arange(1, training_epochs+1)
ax.set_title('Test Accuracy')
i = 0
bx = fig.add_subplot(2,1,2)
bx.set_title('Cost by Epoch')
m = ''
col = ''
sign = ['.', '-', ',', 'o']
cols = ['b','g', 'y', 'r']
for lr in learning_rate:
for n in range(len(learning_rate)):
if n > 3:
m = '^'
break
if lr == learning_rate[n]:
m = sign[n]
for b_ in b1:
for j in range(len(b1)):
if j > 3:
col = 'k'+m
break
if b_ == b1[j]:
col == cols[j]+m
for b_2 in b2:
ax.plot(a, trackAcc[i], col, label=i)
bx.plot(b, trackCost[i], col, label=i)
i += 1
plt.show()
# function to load the csv-data and construct the input array as return
# input array is a vector with one entry per possible unit id
# 94 entries 47 per combat party
def load(version = STANDARD_VERSION, file_version='single'):
match_arr = []
# load file(s) depending on desired input and version number
if file_version == 'multiple':
replay_log_files = []
replay_log_files = build_file_array('logs', version)
i = 0
#print('Looking over', len(replay_log_files), 'files')
while i < len(replay_log_files):
match_arr.append(read_csv(replay_log_files[i]))
i = i + 1
if file_version == 'single':
file_path = os.path.join(REPO_DIR, 'all_csv_from_version_' + version + '.csv')
match_arr = read_summed_up_csv(file_path, 250)
unit_vector_A = np.zeros(47)
unit_vector_B = np.zeros(47)
xs = []
ys = []
#print(match_arr[0], match_arr[3])
n=0
typeerror = 0
for match in match_arr:
# if str(match['winner_code']) == str(2):
# continue
try:
for id in match['team_A']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_A[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_A[1] += 1
if id == 104:
unit_vector_A[2] += 1
if id == 105:
unit_vector_A[3] += 1
if id == 106:
unit_vector_A[4] += 1
if id == 107:
unit_vector_A[5] += 1
if id == 108:
unit_vector_A[6] += 1
if id == 109:
unit_vector_A[7] += 1
if id == 110:
unit_vector_A[8] += 1
if id == 111:
unit_vector_A[9] += 1
if id == 112:
unit_vector_A[10] += 1
if id == 114:
unit_vector_A[11] += 1
if id == 126:
unit_vector_A[12] += 1
if id == 129:
unit_vector_A[13] += 1
if id == 289:
unit_vector_A[14] += 1
if id == 499:
unit_vector_A[15] += 1
if id == 4:
unit_vector_A[16] += 1
if id == 10:
unit_vector_A[17] += 1
if id == 73:
unit_vector_A[18] += 1
if id == 74:
unit_vector_A[19] += 1
if id == 75:
unit_vector_A[20] += 1
if id == 76:
unit_vector_A[21] += 1
if id == 77:
unit_vector_A[22] += 1
if id == 78:
unit_vector_A[23] += 1
if id == 79:
unit_vector_A[24] += 1
if id == 80:
unit_vector_A[25] += 1
if id == 82:
unit_vector_A[26] += 1
if id == 83:
unit_vector_A[27] += 1
if id == 84:
unit_vector_A[28] += 1
if id == 141:
unit_vector_A[29] += 1
if id == 311:
unit_vector_A[30] += 1
if id == 694:
unit_vector_A[31] += 1
if id == 32 or id == 33:
unit_vector_A[32] += 1
if id == 34 or id == 35:
unit_vector_A[33] += 1
if id == 45:
unit_vector_A[34] += 1
if id == 48:
unit_vector_A[35] += 1
if id == 49:
unit_vector_A[36] += 1
if id == 50:
unit_vector_A[37] += 1
if id == 51:
unit_vector_A[38] += 1
if id == 52:
unit_vector_A[39] += 1
if id == 53 or id == 484:
unit_vector_A[40] += 1
if id == 54:
unit_vector_A[41] += 1
if id == 55:
unit_vector_A[42] += 1
if id == 56:
unit_vector_A[43] += 1
if id == 57:
unit_vector_A[44] += 1
if id == 268:
unit_vector_A[45] += 1
if id == 692:
unit_vector_A[46] += 1
for id in match['team_B']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_B[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_B[1] += 1
if id == 104:
unit_vector_B[2] += 1
if id == 105:
unit_vector_B[3] += 1
if id == 106:
unit_vector_B[4] += 1
if id == 107:
unit_vector_B[5] += 1
if id == 108:
unit_vector_B[6] += 1
if id == 109:
unit_vector_B[7] += 1
if id == 110:
unit_vector_B[8] += 1
if id == 111:
unit_vector_B[9] += 1
if id == 112:
unit_vector_B[10] += 1
if id == 114:
unit_vector_B[11] += 1
if id == 126:
unit_vector_B[12] += 1
if id == 129:
unit_vector_B[13] += 1
if id == 289:
unit_vector_B[14] += 1
if id == 499:
unit_vector_B[15] += 1
if id == 4:
unit_vector_B[16] += 1
if id == 10:
unit_vector_B[17] += 1
if id == 73:
unit_vector_B[18] += 1
if id == 74:
unit_vector_B[19] += 1
if id == 75:
unit_vector_B[20] += 1
if id == 76:
unit_vector_B[21] += 1
if id == 77:
unit_vector_B[22] += 1
if id == 78:
unit_vector_B[23] += 1
if id == 79:
unit_vector_B[24] += 1
if id == 80:
unit_vector_B[25] += 1
if id == 82:
unit_vector_B[26] += 1
if id == 83:
unit_vector_B[27] += 1
if id == 84:
unit_vector_B[28] += 1
if id == 141:
unit_vector_B[29] += 1
if id == 311:
unit_vector_B[30] += 1
if id == 694:
unit_vector_B[31] += 1
if id == 32 or id == 33:
unit_vector_B[32] += 1
if id == 34 or id == 35:
unit_vector_B[33] += 1
if id == 45:
unit_vector_B[34] += 1
if id == 48:
unit_vector_B[35] += 1
if id == 49:
unit_vector_B[36] += 1
if id == 50:
unit_vector_B[37] += 1
if id == 51:
unit_vector_B[38] += 1
if id == 52:
unit_vector_B[39] += 1
if id == 53 or id == 484:
unit_vector_B[40] += 1
if id == 54:
unit_vector_B[41] += 1
if id == 55:
unit_vector_B[42] += 1
if id == 56:
unit_vector_B[43] += 1
if id == 57:
unit_vector_B[44] += 1
if id == 268:
unit_vector_B[45] += 1
if id == 692:
unit_vector_B[46] += 1
unit_vector = np.append(unit_vector_A, unit_vector_B)
xs.append(unit_vector)
ys.append(int(match['winner_code']))
except TypeError:
print(id)
typeerror += 1
continue
except ZeroDivisionError:
continue
#print(typeerror)
#print(xs[0])
ys = keras.utils.to_categorical(ys, num_classes=3)
split = int(len(xs)*0.1)
# # Make train / test split
xs_train = xs[:-split]
ys_train = ys[:-split]
xs_test = xs[-split:]
ys_test = ys[-split:]
return xs_train, xs_test, ys_train, ys_test
if __name__ == "__main__":
main()
|
[
"tensorflow.nn.softmax",
"matplotlib.pyplot.figaspect",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.cast",
"numpy.arange",
"numpy.array",
"numpy.append",
"tensorflow.contrib.opt.NadamOptimizer",
"os.path.join",
"tensorflow.truncated_normal",
"keras.utils.to_categorical"
] |
[((1941, 1979), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 94]'], {}), '(tf.float32, [None, 94])\n', (1955, 1979), True, 'import tensorflow as tf\n'), ((1988, 2025), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (2002, 2025), True, 'import tensorflow as tf\n'), ((2258, 2275), 'tensorflow.matmul', 'tf.matmul', (['x', 'W_1'], {}), '(x, W_1)\n', (2267, 2275), True, 'import tensorflow as tf\n'), ((2285, 2303), 'tensorflow.matmul', 'tf.matmul', (['x_', 'W_2'], {}), '(x_, W_2)\n', (2294, 2303), True, 'import tensorflow as tf\n'), ((2317, 2335), 'tensorflow.matmul', 'tf.matmul', (['x_', 'W_3'], {}), '(x_, W_3)\n', (2326, 2335), True, 'import tensorflow as tf\n'), ((2347, 2368), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2360, 2368), True, 'import tensorflow as tf\n'), ((2782, 2815), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2813, 2815), True, 'import tensorflow as tf\n'), ((4088, 4121), 'numpy.arange', 'np.arange', (['(1)', '(training_epochs + 1)'], {}), '(1, training_epochs + 1)\n', (4097, 4121), True, 'import numpy as np\n'), ((4132, 4165), 'numpy.arange', 'np.arange', (['(1)', '(training_epochs + 1)'], {}), '(1, training_epochs + 1)\n', (4141, 4165), True, 'import numpy as np\n'), ((5055, 5065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5063, 5065), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5924), 'numpy.zeros', 'np.zeros', (['(47)'], {}), '(47)\n', (5920, 5924), True, 'import numpy as np\n'), ((5945, 5957), 'numpy.zeros', 'np.zeros', (['(47)'], {}), '(47)\n', (5953, 5957), True, 'import numpy as np\n'), ((15708, 15753), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['ys'], {'num_classes': '(3)'}), '(ys, num_classes=3)\n', (15734, 15753), False, 'import keras\n'), ((2086, 2115), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[94, 94]'], {}), '([94, 94])\n', (2105, 2115), True, 'import tensorflow as tf\n'), ((2139, 2168), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[94, 47]'], {}), '([94, 47])\n', (2158, 2168), True, 'import tensorflow as tf\n'), ((2192, 2220), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[47, 3]'], {}), '([47, 3])\n', (2211, 2220), True, 'import tensorflow as tf\n'), ((2456, 2523), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'y'}), '(logits=logits, labels=y)\n', (2498, 2523), True, 'import tensorflow as tf\n'), ((2654, 2672), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (2663, 2672), True, 'import tensorflow as tf\n'), ((2674, 2689), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2683, 2689), True, 'import tensorflow as tf\n'), ((2721, 2760), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2728, 2760), True, 'import tensorflow as tf\n'), ((2867, 2879), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2877, 2879), True, 'import tensorflow as tf\n'), ((3692, 3710), 'numpy.array', 'np.array', (['trackAcc'], {}), '(trackAcc)\n', (3700, 3710), True, 'import numpy as np\n'), ((5770, 5836), 'os.path.join', 'os.path.join', (['REPO_DIR', "('all_csv_from_version_' + version + '.csv')"], {}), "(REPO_DIR, 'all_csv_from_version_' + version + '.csv')\n", (5782, 5836), False, 'import os\n'), ((2542, 2599), 'tensorflow.contrib.opt.NadamOptimizer', 'tf.contrib.opt.NadamOptimizer', (['learning_rate', 'b1', 'b2', 'eps'], {}), '(learning_rate, b1, b2, eps)\n', (2571, 2599), True, 'import tensorflow as tf\n'), ((3882, 3900), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(4.0)'], {}), '(4.0)\n', (3895, 3900), True, 'import matplotlib.pyplot as plt\n'), ((15381, 15420), 'numpy.append', 'np.append', (['unit_vector_A', 'unit_vector_B'], {}), '(unit_vector_A, unit_vector_B)\n', (15390, 15420), True, 'import numpy as np\n')]
|
"""
The PIMA Indians dataset obtained from the UCI Machine Learning Repository
The goal is to predict whether or not a given female patient will contract diabetes
based on features such as BMI, age, and number of pregnancies
It is a binary classification problem
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import randint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report,
confusion_matrix,
roc_curve,
roc_auc_score,
)
from sklearn.model_selection import (
train_test_split,
cross_val_score,
GridSearchCV,
RandomizedSearchCV,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
plt.style.use("ggplot")
_df = pd.read_csv("datasets/diabetes.csv")
df = _df.dropna()
X = df.drop("Outcome", axis=1).values
# X = X.reshape(-1, 8)
y = df.Outcome.values
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(X_test, y_test)
y_pred = knn.predict(X_test)
print("k-NN performance")
# must always be (test, prediction)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# the support columns gives the number of samples of the true response that lie in that class
#### logistic regression ####
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("logistic regression performance")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# predict_proba returns an array with two columns: each column contains the probabilities for the respective target values.
# we choose the second column, the one with index 1,
# that is, the probabilities of the predicted labels being '1'
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
_ = plt.plot([0, 1], [0, 1], "k--")
_ = plt.plot(fpr, tpr)
_ = plt.xlabel("False Positive Rate")
_ = plt.ylabel("True Positive Rate")
_ = plt.title("ROC Curve")
plt.show()
print(f"AUC: {roc_auc_score(y_test, y_pred_prob)}")
cv_auc = cross_val_score(logreg, X, y, cv=5, scoring="roc_auc")
#### hyperparameter tuning ####
# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {"C": c_space} # hyperparameter to tune and values to test
logreg = LogisticRegression()
logreg_cv = GridSearchCV(
logreg, param_grid, cv=5
) # instantiate the GridSearchCV object
logreg_cv.fit(X, y) # fits in place
print(
f"""Tuned Logistic Regression Parameters: {logreg_cv.best_params_}
Best score is {logreg_cv.best_score_}"""
)
#### random tuning ####
tree = DecisionTreeClassifier()
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"],
}
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
tree_cv.fit(X, y)
print(
f"""Tuned Decision Tree Parameters: {tree_cv.best_params_}
Best score is {tree_cv.best_score_}"""
)
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"numpy.logspace",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.style.use",
"sklearn.model_selection.RandomizedSearchCV",
"matplotlib.pyplot.show",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"scipy.stats.randint",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel"
] |
[((771, 794), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (784, 794), True, 'import matplotlib.pyplot as plt\n'), ((802, 838), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/diabetes.csv"""'], {}), "('datasets/diabetes.csv')\n", (813, 838), True, 'import pandas as pd\n'), ((999, 1053), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(42)'}), '(X, y, test_size=0.4, random_state=42)\n', (1015, 1053), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((1067, 1102), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(6)'}), '(n_neighbors=6)\n', (1087, 1102), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1440, 1460), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1458, 1460), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1964, 1994), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred_prob'], {}), '(y_test, y_pred_prob)\n', (1973, 1994), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((2000, 2031), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (2008, 2031), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2054), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2044, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2092), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2069, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2107, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2156), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (2143, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2285), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['logreg', 'X', 'y'], {'cv': '(5)', 'scoring': '"""roc_auc"""'}), "(logreg, X, y, cv=5, scoring='roc_auc')\n", (2246, 2285), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((2361, 2383), 'numpy.logspace', 'np.logspace', (['(-5)', '(8)', '(15)'], {}), '(-5, 8, 15)\n', (2372, 2383), True, 'import numpy as np\n'), ((2466, 2486), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2484, 2486), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2499, 2537), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['logreg', 'param_grid'], {'cv': '(5)'}), '(logreg, param_grid, cv=5)\n', (2511, 2537), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((2774, 2798), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2796, 2798), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2968, 3010), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['tree', 'param_dist'], {'cv': '(5)'}), '(tree, param_dist, cv=5)\n', (2986, 3010), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((1225, 1257), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1241, 1257), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1265, 1302), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1286, 1302), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1569, 1601), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1585, 1601), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1609, 1646), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1630, 1646), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((2863, 2876), 'scipy.stats.randint', 'randint', (['(1)', '(9)'], {}), '(1, 9)\n', (2870, 2876), False, 'from scipy.stats import randint\n'), ((2902, 2915), 'scipy.stats.randint', 'randint', (['(1)', '(9)'], {}), '(1, 9)\n', (2909, 2915), False, 'from scipy.stats import randint\n'), ((2183, 2217), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_prob'], {}), '(y_test, y_pred_prob)\n', (2196, 2217), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n')]
|
import collections
from typing import Any, Optional
from plz.cli.composition_operation import CompositionOperation, \
create_path_string_prefix
from plz.cli.configuration import Configuration
from plz.cli.log import log_info
from plz.cli.operation import on_exception_reraise
ExecutionStatus = collections.namedtuple('ExecutionStatus',
['running', 'success', 'code'])
class ShowStatusOperation(CompositionOperation):
"""Output the status of an execution"""
@classmethod
def name(cls):
return 'status'
@classmethod
def prepare_argument_parser(cls, parser, args):
cls.maybe_add_execution_id_arg(parser, args)
def __init__(self,
configuration: Configuration,
execution_id: Optional[str] = None):
super().__init__(configuration)
self.execution_id = execution_id
@on_exception_reraise('Retrieving the status failed.')
def get_status(self, atomic_execution_id: Optional[str] = None):
if atomic_execution_id is None:
atomic_execution_id = self.get_execution_id()
status = self.controller.get_status(atomic_execution_id)
return ExecutionStatus(running=status['running'],
success=status['success'],
code=status['exit_status'])
def run_atomic(self, atomic_execution_id: str,
composition_path: [(str, Any)]):
status = self.get_status(atomic_execution_id)
string_prefix = create_path_string_prefix(composition_path)
log_info(f'{string_prefix}Status:')
print('Running:', status.running)
print('Success:', status.success)
print('Exit Status:', status.code)
|
[
"plz.cli.log.log_info",
"plz.cli.composition_operation.create_path_string_prefix",
"plz.cli.operation.on_exception_reraise",
"collections.namedtuple"
] |
[((300, 373), 'collections.namedtuple', 'collections.namedtuple', (['"""ExecutionStatus"""', "['running', 'success', 'code']"], {}), "('ExecutionStatus', ['running', 'success', 'code'])\n", (322, 373), False, 'import collections\n'), ((906, 959), 'plz.cli.operation.on_exception_reraise', 'on_exception_reraise', (['"""Retrieving the status failed."""'], {}), "('Retrieving the status failed.')\n", (926, 959), False, 'from plz.cli.operation import on_exception_reraise\n'), ((1549, 1592), 'plz.cli.composition_operation.create_path_string_prefix', 'create_path_string_prefix', (['composition_path'], {}), '(composition_path)\n', (1574, 1592), False, 'from plz.cli.composition_operation import CompositionOperation, create_path_string_prefix\n'), ((1601, 1636), 'plz.cli.log.log_info', 'log_info', (['f"""{string_prefix}Status:"""'], {}), "(f'{string_prefix}Status:')\n", (1609, 1636), False, 'from plz.cli.log import log_info\n')]
|
from ldif import LDIFParser
class ParseLDIF(LDIFParser):
def __init__(self, input_file, processing_object):
LDIFParser.__init__(self, input_file)
self.processing_object = processing_object
def handle(self,dn, entry):
self.processing_object.process_entry(dn, entry)
|
[
"ldif.LDIFParser.__init__"
] |
[((123, 160), 'ldif.LDIFParser.__init__', 'LDIFParser.__init__', (['self', 'input_file'], {}), '(self, input_file)\n', (142, 160), False, 'from ldif import LDIFParser\n')]
|
import json
import pdb
from functools import reduce
from collections import OrderedDict, Counter
import random
import re
def replace_num_or_special(word):
if re.match('\d+', word):
return 'NUMBER'
elif re.match('[a-zA-Z]+', word):
return word
else:
return 'SPECIAL'
building = 'ebu3b'
with open('metadata/{0}_sentence_dict_justseparate.json'.format(building),
'r') as fp:
sentence_dict = json.load(fp)
srcids = list(sentence_dict.keys())
for srcid, sentence in sentence_dict.items():
sentence_dict[srcid] = list(map(replace_num_or_special, sentence))
adder = lambda x,y: x + y
total_words = set(reduce(adder, sentence_dict.values()))
word_counter = Counter(reduce(adder, sentence_dict.values()))
with open('model/{0}_word_clustering_justseparate.json'.format(building),
'r') as fp:
cluster_dict = json.load(fp)
# Learning Sample Selection
sample_srcids = set()
length_counter = lambda x: len(x[1])
ander = lambda x, y: x and y
n = 100
sample_cnt = 0
shuffle_flag = False
sorted_cluster_dict = OrderedDict(
sorted(cluster_dict.items(), key=length_counter, reverse=True))
#n = len(sorted_cluster_dict) #TODO: Remove if not working well
while len(sample_srcids) < n:
cluster_dict_items = list(sorted_cluster_dict.items())
if shuffle_flag:
random.shuffle(cluster_dict_items)
for cluster_num, srcid_list in cluster_dict_items:
valid_srcid_list = set(srcid_list)\
.intersection(set(srcids))\
.difference(set(sample_srcids))
if len(valid_srcid_list) > 0:
sample_srcids.add(\
random.choice(list(valid_srcid_list)))
if len(sample_srcids) >= n:
break
sample_sentence_dict = dict((srcid, sentence_dict[srcid])
for srcid in sample_srcids)
pdb.set_trace()
|
[
"re.match",
"random.shuffle",
"json.load",
"pdb.set_trace"
] |
[((1859, 1874), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1872, 1874), False, 'import pdb\n'), ((164, 186), 're.match', 're.match', (['"""\\\\d+"""', 'word'], {}), "('\\\\d+', word)\n", (172, 186), False, 'import re\n'), ((442, 455), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (451, 455), False, 'import json\n'), ((874, 887), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (883, 887), False, 'import json\n'), ((220, 247), 're.match', 're.match', (['"""[a-zA-Z]+"""', 'word'], {}), "('[a-zA-Z]+', word)\n", (228, 247), False, 'import re\n'), ((1334, 1368), 'random.shuffle', 'random.shuffle', (['cluster_dict_items'], {}), '(cluster_dict_items)\n', (1348, 1368), False, 'import random\n')]
|
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
import psutil
async def help(client, msg, args):
client.select_lang(msg, "all")
await msg.reply(msg.lang["help"]["ok"])
async def start(client, msg, args):
client.select_lang(msg, "all")
await msg.reply(msg.lang["start"]["ok"])
# Choose of languages
# - Callback of response
async def setlang(client, callback, args):
msg = callback.message
client.select_lang(msg, "all")
if msg.chat.type in ("group", "supergroup"):
info = await client.get_chat_member(msg.chat.id, callback.from_user.id)
if info.status not in ("administrator", "creator"):
await client.answer_callback_query(
callback.id,
msg.lang["setlang"]["not_admin"],
show_alert=True
)
return
if not args[0] in client.langs:
await client.edit_message_text(
message_id=msg.message_id,
chat_id=msg.chat.id,
text=msg.lang["setlang"]["not_found"]
)
return
client.db.set_lang(msg.chat.id, args[0])
client.select_lang(msg, "all")
await client.edit_message_text(
message_id=msg.message_id,
chat_id=msg.chat.id,
text=msg.lang["setlang"]["ok"]
)
await client.answer_callback_query(callback.id, "Ok.")
# - Send buttons to choose a language
async def getlangs(client, msg, args):
client.select_lang(msg, "all")
text = msg.lang["setlang"]["select"] + "\n\n"
buttons = []
for lang in client.langs.keys():
buttons.append(
[
InlineKeyboardButton(
client.langs[lang]["name"]+" - "+lang,
callback_data="setlang " + lang
)
]
)
await msg.reply(text, reply_markup=InlineKeyboardMarkup(buttons))
# Channel of updates from bot
async def channel(client, msg, args):
client.select_lang(msg, "all")
if "channel" in client.conf:
await msg.reply(
msg.lang["channel"]["ok"].format(uri=client.conf["channel"])
)
# Stats of server computer
async def status(client, msg, args):
cpu = psutil.cpu_freq()
cpu_str: str = f"{int(cpu.current)}/{int(cpu.max)}MHZ ({psutil.cpu_percent()}%)"
mem = psutil.virtual_memory()
mem_str: str = f"{mem.used // 1048576}/{mem.total // 1048576}MiB"
mem_str += f" ({int((mem.used / mem.total) * 100)}%)"
disk = psutil.disk_usage(".")
disk_str: str = f"{disk.used // (2**30)}/{disk.total // (2**30)}GiB"
disk_str += f" ({int(disk.percent)}%)"
await msg.reply(
"Server status\n\n" +
f"Memory: {mem_str}\n" +
f"CPU[min={int(cpu.min)}MHZ]: {cpu_str}\n" +
f"Disk: {disk_str}"
)
|
[
"psutil.virtual_memory",
"psutil.cpu_freq",
"psutil.disk_usage",
"pyrogram.types.InlineKeyboardButton",
"pyrogram.types.InlineKeyboardMarkup",
"psutil.cpu_percent"
] |
[((2197, 2214), 'psutil.cpu_freq', 'psutil.cpu_freq', ([], {}), '()\n', (2212, 2214), False, 'import psutil\n'), ((2310, 2333), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2331, 2333), False, 'import psutil\n'), ((2473, 2495), 'psutil.disk_usage', 'psutil.disk_usage', (['"""."""'], {}), "('.')\n", (2490, 2495), False, 'import psutil\n'), ((2275, 2295), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (2293, 2295), False, 'import psutil\n'), ((1630, 1730), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (["(client.langs[lang]['name'] + ' - ' + lang)"], {'callback_data': "('setlang ' + lang)"}), "(client.langs[lang]['name'] + ' - ' + lang,\n callback_data='setlang ' + lang)\n", (1650, 1730), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n'), ((1844, 1873), 'pyrogram.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['buttons'], {}), '(buttons)\n', (1864, 1873), False, 'from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n')]
|
from iota import Iota
from iota.crypto.addresses import AddressGenerator
seed = b'<KEY>'
# generator = AddressGenerator(seed)
generator =\
AddressGenerator(
seed=seed,
security_level=3,
)
# Generate a list of addresses:
# addresses = generator.get_addresses(index=0, count=5)
# NOOO! Document was wrong!!!!!!! use `start` instead `index`
addresses = generator.get_addresses(start=0, count=5)
print(addresses)
print('='*20)
# Generate a list of addresses in reverse order:
# addresses = generator.get_addresses(start=42, count=10, step=-1)
addresses = generator.get_addresses(start=0, count=5)
print(addresses)
|
[
"iota.crypto.addresses.AddressGenerator"
] |
[((146, 191), 'iota.crypto.addresses.AddressGenerator', 'AddressGenerator', ([], {'seed': 'seed', 'security_level': '(3)'}), '(seed=seed, security_level=3)\n', (162, 191), False, 'from iota.crypto.addresses import AddressGenerator\n')]
|
"""
Collects indicators from the World Bank. Currently, we collect indicators from the following URLs:
- http://datatopics.worldbank.org/world-development-indicators/themes/economy.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/states-and-markets.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/global-links.html#featured-indicators_1
- http://datatopics.worldbank.org/world-development-indicators/themes/people.html#featured-indicators_1
We use the pandas-datareader, a Python package that provides access to economic databases
for this as it is straightforward to collect indicators by querying their unique code.
Orion currently collects the following country-level indicators:
* GDP (current US$)
* Research and development expenditure (% of GDP)
* Government expenditure on education, total (% of GDP)
* Ratio of female to male labour force participation rate (%) (modelled ILO estimate)
Users can filter indicators by start and end year as well as country.
"""
from pandas_datareader import wb
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from orion.core.orms.mag_orm import (
WorldBankFemaleLaborForce,
WorldBankGovEducation,
WorldBankResearchDevelopment,
WorldBankGDP,
)
class WBIndicatorOperator(BaseOperator):
"""Fetches indicators from the World Bank."""
@apply_defaults
def __init__(
self,
db_config,
table_name,
indicator,
start_year,
end_year,
country,
*args,
**kwargs
):
super().__init__(**kwargs)
self.db_config = db_config
self.indicator = indicator
self.start_year = start_year
self.end_year = end_year
self.country = country
self.table_name = table_name
self.tables = {
"wb_gdp": WorldBankGDP,
"wb_edu_expenditure": WorldBankGovEducation,
"wb_rnd_expenditure": WorldBankResearchDevelopment,
"wb_female_workforce": WorldBankFemaleLaborForce,
}
def execute(self, context):
# Connect to postgresql db
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
# Fetch WB Indicator
ind = wb.download(
indicator=self.indicator,
country=self.country,
start=self.start_year,
end=self.end_year,
)
# Store in DB
for (area, year), row in ind.iterrows():
s.add(
self.tables[self.table_name](
country=area, year=year, indicator=row[self.indicator]
)
)
s.commit()
|
[
"sqlalchemy.create_engine",
"sqlalchemy.orm.sessionmaker",
"pandas_datareader.wb.download"
] |
[((2302, 2331), 'sqlalchemy.create_engine', 'create_engine', (['self.db_config'], {}), '(self.db_config)\n', (2315, 2331), False, 'from sqlalchemy import create_engine\n'), ((2350, 2370), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['engine'], {}), '(engine)\n', (2362, 2370), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2437, 2543), 'pandas_datareader.wb.download', 'wb.download', ([], {'indicator': 'self.indicator', 'country': 'self.country', 'start': 'self.start_year', 'end': 'self.end_year'}), '(indicator=self.indicator, country=self.country, start=self.\n start_year, end=self.end_year)\n', (2448, 2543), False, 'from pandas_datareader import wb\n')]
|
"""
Some analysis of informational content of TLDR-Auth and TLDR-PR
"""
import os
import csv
from collections import Counter, defaultdict
INFILE = 'tldr_analyze_nuggets/tldr_auth_pr_gold_nuggets_2020-03-31.csv'
# Q1: How many nuggets do TLDRs contain?
# A: Interesting, both author and PR have nearly identical distributions:
# From most to least common: 3 nuggets -> 2 nuggets -> 4 nuggets -> 1 nugget -> ...
# Auth proportions: (34%) (26%) (18%) (11%)
# PR proportions: (32%) (30%) (26%) ( 9%)
author_num_nuggets_to_count = {i: 0 for i in range(0,7)}
pr_num_nuggets_to_count = {i: 0 for i in range(0,7)}
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
num_nuggets = sum(map(int, [row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif']]))
if row['auth_pr'] == 'auth_gold':
author_num_nuggets_to_count[num_nuggets] += 1
if row['auth_pr'] == 'pr_gold':
pr_num_nuggets_to_count[num_nuggets] += 1
print({k: f'{100*v/76:.2f}' for k, v in author_num_nuggets_to_count.items()})
print({k: f'{100*v/76:.2f}' for k, v in pr_num_nuggets_to_count.items()})
# Q2: What are the most common TLDR templates?
# A: Interesting, the top 2 templates (total 42 occurrences) are same between Authors and PRs.
# a) (area_field_topic, mode_of_contrib, details_descrip)
# b) (area_field_topic, mode_of_contrib)
# After that, next 3 starts deviating a bit, but still with the same base:
# authors = (area_field_topic, mode_of_contrib, results_findings)
# (area_field_topic, problem_motivation, mode_of_contrib)
# (area_field_topic, mode_of_contrib, details_descrip, value_signif)
# pr = (area_field_topic, problem_motivation, mode_of_contrib, details_descrip)
# = (area_field_topic, details_descrip)
# = (area_field_topic, mode_of_contrib, results_findings) # same as top 3rd in Auth
author_template_to_count = Counter()
pr_template_to_count = Counter()
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
if row['auth_pr'] == 'auth_gold':
author_template_to_count[template] += 1
if row['auth_pr'] == 'pr_gold':
pr_template_to_count[template] += 1
print(author_template_to_count.most_common())
print(pr_template_to_count.most_common())
# Q3: How often does 'area_field_topic' and 'mode_of_contrib' co-occur?
# n_auth = 48/76 = 63%
# n_pr = 54/76 = 71%
n_auth = 0
n_pr = 0
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
if row['area_field_topic'] == '1' and row['mode_of_contrib'] == '1':
if row['auth_pr'] == 'auth_gold':
n_auth += 1
if row['auth_pr'] == 'pr_gold':
n_pr += 1
# Q4: Find examples with exactly the same nuggets but different styles
#
# H1-IBSgMz
# B16yEqkCZ
# SySpa-Z0Z
# rJegl2C9K7
# HJWpQCa7z
# rkgpCoRctm
# rkxkHnA5tX
# B1e9csRcFm
# r1kj4ACp-
# Hk91SGWR-
# r1GaAjRcF7
# SkGMOi05FQ
#
pid_to_templates = defaultdict(set)
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
pid_to_templates[row['paper_id']].add(template)
for pid, templates in pid_to_templates.items():
if len(templates) == 1:
print(pid)
|
[
"collections.defaultdict",
"collections.Counter",
"csv.DictReader"
] |
[((2150, 2159), 'collections.Counter', 'Counter', ([], {}), '()\n', (2157, 2159), False, 'from collections import Counter, defaultdict\n'), ((2183, 2192), 'collections.Counter', 'Counter', ([], {}), '()\n', (2190, 2192), False, 'from collections import Counter, defaultdict\n'), ((3414, 3430), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3425, 3430), False, 'from collections import Counter, defaultdict\n'), ((736, 756), 'csv.DictReader', 'csv.DictReader', (['f_in'], {}), '(f_in)\n', (750, 756), False, 'import csv\n'), ((2233, 2253), 'csv.DictReader', 'csv.DictReader', (['f_in'], {}), '(f_in)\n', (2247, 2253), False, 'import csv\n'), ((2902, 2922), 'csv.DictReader', 'csv.DictReader', (['f_in'], {}), '(f_in)\n', (2916, 2922), False, 'import csv\n'), ((3471, 3491), 'csv.DictReader', 'csv.DictReader', (['f_in'], {}), '(f_in)\n', (3485, 3491), False, 'import csv\n')]
|
import argparse
import bot3
import datetime
import praw3 as praw
import random
import sqlite3
import string
import subprocess
import sys
import time
import tkinter
import traceback
import types
from voussoirkit import betterhelp
from voussoirkit import mutables
from voussoirkit import operatornotify
from voussoirkit import pipeable
from voussoirkit import sqlhelpers
from voussoirkit import vlogging
log = vlogging.getLogger(__name__, 'sb')
USERAGENT = '''
/u/GoldenSights SubredditBirthdays data collection:
Gathering the creation dates of subreddits for visualization.
More at https://github.com/voussoir/reddit/tree/master/SubredditBirthdays
'''.replace('\n', ' ').strip()
LOWERBOUND_STR = '2qh0j'
LOWERBOUND_INT = 4594339
FORMAT_MEMBER = '{idstr:>5s}, {human}, {nsfw}, {name:<25s} {subscribers:>10,}'
FORMAT_MESSAGE_NEW = 'New: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers}'
FORMAT_MESSAGE_UPDATE = 'Upd: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers} ({subscriber_diff})'
RANKS_UP_TO = 20000
# For the files sorted by subscriber count, display ranks up to this many.
GOODCHARS = string.ascii_letters + string.digits + '_'
DB_INIT = '''
BEGIN;
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS subreddits(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
nsfw INT,
subscribers INT,
jumble INT,
subreddit_type INT,
submission_type INT,
last_scanned INT
);
CREATE INDEX IF NOT EXISTS index_subreddits_idstr ON subreddits(idstr);
CREATE INDEX IF NOT EXISTS index_subreddits_name ON subreddits(name);
CREATE INDEX IF NOT EXISTS index_subreddits_created ON subreddits(created);
CREATE INDEX IF NOT EXISTS index_subreddits_subscribers ON subreddits(subscribers);
--CREATE INDEX IF NOT EXISTS index_subreddits_idint ON subreddits(idint);
--CREATE INDEX IF NOT EXISTS index_subreddits_last_scanned ON subreddits(last_scanned);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS suspicious(
idint INT,
idstr TEXT,
name TEXT,
subscribers INT,
noticed INT
);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS popular(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_popular_idstr on popular(idstr);
CREATE INDEX IF NOT EXISTS index_popular_last_seen on popular(last_seen);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS jumble(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_jumble_idstr on jumble(idstr);
CREATE INDEX IF NOT EXISTS index_jumble_last_seen on jumble(last_seen);
--------------------------------------------------------------------------------
COMMIT;
'''
sql = sqlite3.connect('D:\\git\\reddit\\subredditbirthdays\\sb.db')
sqlhelpers.executescript(conn=sql, script=DB_INIT)
cur = sql.cursor()
# These numbers are used for interpreting the tuples that come from SELECT
SQL_SUBREDDIT_COLUMNS = [
'idint',
'idstr',
'created',
'human',
'name',
'nsfw',
'subscribers',
'subreddit_type',
'submission_type',
'last_scanned',
]
SQL_SUSPICIOUS_COLUMNS = [
'idint',
'idstr',
'name',
'subscribers',
'noticed',
]
SQL_SUBREDDIT = {key: index for (index, key) in enumerate(SQL_SUBREDDIT_COLUMNS)}
noinfolist = []
monthnumbers = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12',
}
SUBREDDIT_TYPE = {
'public': 0,
'restricted': 1,
'private': 2,
'archived': 3,
None: 4,
'employees_only': 5,
'gold_restricted': 6,
'gold_only': 7,
'user': 8,
}
SUBMISSION_TYPE = {
'any': 0,
'link': 1,
'self': 2,
None: 3,
}
SUBREDDIT_TYPE_REVERSE = {v: k for (k, v) in SUBREDDIT_TYPE.items()}
SUBMISSION_TYPE_REVERSE = {v: k for (k, v) in SUBMISSION_TYPE.items()}
SUBMISSION_OBJ = praw.objects.Submission
SUBREDDIT_OBJ = praw.objects.Subreddit
COMMENT_OBJ = praw.objects.Comment
r = None
def login():
global r
print('Logging in.')
r = praw.Reddit(USERAGENT)
bot3.login(r)
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
'''Converts an integer to a base36 string.'''
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def chunklist(inputlist, chunksize):
if len(inputlist) < chunksize:
return [inputlist]
else:
outputlist = []
while len(inputlist) > 0:
outputlist.append(inputlist[:chunksize])
inputlist = inputlist[chunksize:]
return outputlist
def completesweep(sleepy=0, orderby='subscribers desc', query=None):
cur = sql.cursor()
if query is None:
if orderby is None:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0')
else:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY %s' % orderby)
elif query == 'restricted':
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 AND subreddit_type != 0 ORDER BY subscribers DESC')
else:
cur.execute(query)
try:
while True:
hundred = (cur.fetchone() for x in range(100))
hundred = (row for row in hundred if row is not None)
hundred = [idstr for (idstr,) in hundred]
if len(hundred) == 0:
break
for retry in range(20):
try:
processmega(hundred, commit=False)
break
except Exception:
traceback.print_exc()
time.sleep(sleepy)
except KeyboardInterrupt:
pass
except Exception:
traceback.print_exc()
sql.commit()
def fetchgenerator(cur):
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def get_jumble_subreddits():
cur.execute('SELECT idstr FROM jumble')
fetch = [x[0] for x in cur.fetchall()]
fetch = ['\'%s\'' % x for x in fetch]
fetch = '(' + ','.join(fetch) + ')'
query = 'SELECT * FROM subreddits WHERE idstr IN %s' % fetch
cur.execute(query)
subreddits = cur.fetchall()
#subreddits = []
#for subreddit in fetch:
# cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [subreddit])
# subreddits.append(cur.fetchone())
return subreddits
def get_newest_sub():
brandnewest = list(r.get_new_subreddits(limit=1))[0]
return brandnewest.id
def get_now():
return datetime.datetime.now(datetime.timezone.utc).timestamp()
def humanize(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def modernize(limit=None):
cur.execute('SELECT * FROM subreddits ORDER BY created DESC LIMIT 1')
finalitem = cur.fetchone()
print('Current final item:')
print(finalitem[SQL_SUBREDDIT['idstr']], finalitem[SQL_SUBREDDIT['human']], finalitem[SQL_SUBREDDIT['name']])
finalid = finalitem[SQL_SUBREDDIT['idint']]
print('Newest item:')
newestid = get_newest_sub()
print(newestid)
newestid = b36(newestid)
if limit is not None:
newestid = min(newestid, finalid+limit-1)
modernlist = [b36(x) for x in range(finalid, newestid+1)]
if len(modernlist) > 0:
processmega(modernlist, commit=False)
sql.commit()
def modernize_forever(limit=10000):
while True:
try:
modernize(limit=limit)
except Exception:
log.warning(traceback.format_exc())
time.sleep(300)
def modsfromid(subid):
if 't5_' not in subid:
subid = 't5_' + subid
subreddit = r.get_info(thing_id=subid)
mods = list(subreddit.get_moderators())
for m in mods:
print(m)
return mods
def normalize_subreddit_object(thing):
'''
Given a string, Subreddit, Submission, or Comment object, return
a Subreddit object.
'''
if isinstance(thing, SUBREDDIT_OBJ):
return thing
if isinstance(thing, str):
return r.get_subreddit(thing)
if isinstance(thing, (SUBMISSION_OBJ, COMMENT_OBJ)):
return thing.subreddit
raise ValueError('Dont know how to normalize', type(thing))
def process(
subreddit,
commit=True,
):
'''
Retrieve the API info for the subreddit and save it to the database
subreddit:
The subreddit(s) to process. Can be an individual or list of:
strings or Subreddit, Submission, or Comment objects.
'''
subreddits = []
processed_subreddits = []
if isinstance(subreddit, (tuple, list, set, types.GeneratorType)):
subreddits = iter(subreddit)
else:
subreddits = [subreddit]
for subreddit in subreddits:
subreddit = normalize_subreddit_object(subreddit)
processed_subreddits.append(subreddit)
created = subreddit.created_utc
created_human = humanize(subreddit.created_utc)
idstr = subreddit.id
is_nsfw = int(subreddit.over18 or 0)
name = subreddit.display_name
subscribers = subreddit.subscribers or 0
subreddit_type = SUBREDDIT_TYPE[subreddit.subreddit_type]
submission_type = SUBMISSION_TYPE[subreddit.submission_type]
now = int(get_now())
cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [idstr])
f = cur.fetchone()
if f is None:
message = FORMAT_MESSAGE_NEW.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
)
print(message)
data = {
'idint': b36(idstr),
'idstr': idstr,
'created': created,
'human': created_human,
'nsfw': is_nsfw,
'name': name,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUBREDDIT_COLUMNS, data)
query = 'INSERT INTO subreddits VALUES(%s)' % qmarks
cur.execute(query, bindings)
else:
old_subscribers = f[SQL_SUBREDDIT['subscribers']]
subscriber_diff = subscribers - old_subscribers
if subscribers == 0 and old_subscribers > 2 and subreddit_type != SUBREDDIT_TYPE['private']:
print('SUSPICIOUS %s' % name)
data = {
'idint': b36(idstr),
'idstr': idstr,
'name': name,
'subscribers': old_subscribers,
'noticed': int(get_now()),
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUSPICIOUS_COLUMNS, data)
query = 'INSERT INTO suspicious VALUES(%s)' % qmarks
cur.execute(query, bindings)
message = FORMAT_MESSAGE_UPDATE.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
subscriber_diff=subscriber_diff
)
print(message)
data = {
'idstr': idstr,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(query, bindings) = sqlhelpers.update_filler(data, where_key='idstr')
query = 'UPDATE subreddits %s' % query
cur.execute(query, bindings)
#cur.execute('''
# UPDATE subreddits SET
# subscribers = @subscribers,
# subreddit_type = @subreddit_type,
# submission_type = @submission_type,
# last_scanned = @last_scanned
# WHERE idstr == @idstr
# ''', data)
processed_subreddits.append(subreddit)
if commit:
sql.commit()
return processed_subreddits
def process_input():
while True:
x = input('p> ')
try:
process(x)
except KeyboardInterrupt:
break
except Exception:
traceback.print_exc()
def processmega(srinput, isrealname=False, chunksize=100, docrash=False, commit=True):
'''
`srinput` can be a list of subreddit IDs or fullnames, or display names
if `isrealname` is also True.
isrealname:
Interpret `srinput` as a list of actual subreddit names, not IDs.
chunksize:
The number of fullnames to get from api/info at once.
docrash:
If False, ignore HTTPExceptions and keep moving forward.
'''
global noinfolist
if type(srinput) == str:
srinput = srinput.replace(' ', '')
srinput = srinput.split(',')
if isrealname:
for subname in srinput:
process(subname)
return
processed_subreddits = []
remaining = len(srinput)
for x in range(len(srinput)):
if 't5_' not in srinput[x]:
srinput[x] = 't5_' + srinput[x]
srinput = chunklist(srinput, chunksize)
for subset in srinput:
try:
print(subset[0] + ' - ' + subset[-1], remaining)
subreddits = r.get_info(thing_id=subset)
try:
for sub in subreddits:
processed_subreddits.extend(process(sub, commit=commit))
except TypeError:
traceback.print_exc()
noinfolist = subset[:]
if len(noinfolist) == 1:
print('Received no info. See variable `noinfolist`')
else:
#for item in noinfolist:
# processmega([item])
pass
remaining -= len(subset)
except praw.errors.HTTPException as e:
traceback.print_exc()
print(vars(e))
if docrash:
raise
return processed_subreddits
def processrand(count, doublecheck=False, sleepy=0):
'''
Gets random IDs between a known lower bound and the newest collection, and
pass them into processmega().
count:
How many you want
doublecheck:
Should it reroll duplicates before running
sleepy:
Used to sleep longer than the required 2 seconds
'''
lower = LOWERBOUND_INT
cur.execute('SELECT * FROM subreddits ORDER BY idstr DESC LIMIT 1')
upper = cur.fetchone()[SQL_SUBREDDIT['idstr']]
print('<' + b36(lower) + ',', upper + '>', end=', ')
upper = b36(upper)
totalpossible = upper - lower
print(totalpossible, 'possible')
rands = set()
for x in range(count):
rand = random.randint(lower, upper)
rand = b36(rand)
if doublecheck:
while rand in rands:
rand = random.randint(lower, upper)
rand = b36(rand)
rands.add(rand)
processmega(rands)
def show():
file_all_time = open('show\\all-time.txt', 'w')
file_all_name = open('show\\all-name.txt', 'w')
file_all_subscribers = open('show\\all-subscribers.txt', 'w')
file_dirty_time = open('show\\dirty-time.txt', 'w')
file_dirty_name = open('show\\dirty-name.txt', 'w')
file_dirty_subscribers = open('show\\dirty-subscribers.txt', 'w')
file_jumble_sfw = open('show\\jumble.txt', 'w')
file_jumble_nsfw = open('show\\jumble-nsfw.txt', 'w')
file_duplicates = open('show\\duplicates.txt', 'w')
file_missing = open('show\\missing.txt', 'w')
file_stats = open('show\\statistics.txt', 'w')
file_readme = open('README.md', 'r')
cur.execute('SELECT COUNT(idstr) FROM subreddits WHERE created != 0')
itemcount_valid = cur.fetchone()[0]
itemcount_nsfw = 0
name_lengths = {}
print(itemcount_valid, 'subreddits')
print('Writing time files.')
cur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
print(itemf, file=file_all_time)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_time)
itemcount_nsfw += 1
file_all_time.close()
file_dirty_time.close()
print('Writing name files and duplicates.')
previousitem = None
inprogress = False
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')
for item in fetchgenerator(cur):
if previousitem is not None and item[SQL_SUBREDDIT['name']] == previousitem[SQL_SUBREDDIT['name']]:
print(memberformat(previousitem), file=file_duplicates)
inprogress = True
elif inprogress:
print(memberformat(previousitem), file=file_duplicates)
inprogress = False
previousitem = item
name_length = len(item[SQL_SUBREDDIT['name']])
name_lengths[name_length] = name_lengths.get(name_length, 0) + 1
itemf = memberformat(item)
print(itemf, file=file_all_name)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_name)
file_duplicates.close()
file_all_name.close()
file_dirty_name.close()
name_lengths = {'%02d'%k: v for (k, v) in name_lengths.items()}
print('Writing subscriber files.')
ranks = {'all': 1, 'nsfw': 1}
def write_with_rank(itemf, ranktype, filehandle):
index = ranks[ranktype]
if index <= RANKS_UP_TO:
itemf += '{:>9,}'.format(index)
print(itemf, file=filehandle)
ranks[ranktype] += 1
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
write_with_rank(itemf, 'all', file_all_subscribers)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
write_with_rank(itemf, 'nsfw', file_dirty_subscribers)
file_all_subscribers.close()
file_dirty_subscribers.close()
print('Writing jumble.')
for item in get_jumble_subreddits():
itemf = memberformat(item)
if int(item[SQL_SUBREDDIT['nsfw']]) == 0:
print(itemf, file=file_jumble_sfw)
else:
print(itemf, file=file_jumble_nsfw)
file_jumble_sfw.close()
file_jumble_nsfw.close()
print('Writing missing.')
cur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idstr ASC')
for item in fetchgenerator(cur):
print(item[SQL_SUBREDDIT['idstr']], file=file_missing)
file_missing.close()
print('Writing statistics.')
headline = 'Collected {0:,} subreddits\n'.format(itemcount_valid)
statisticoutput = headline + '\n\n'
statisticoutput += ' SFW: {0:,}\n'.format(itemcount_valid - itemcount_nsfw)
statisticoutput += 'NSFW: {0:,}\n\n\n'.format(itemcount_nsfw)
statisticoutput += 'Subreddit type:\n'
subreddit_types = list(SUBREDDIT_TYPE_REVERSE.keys())
subreddit_types.sort()
subreddit_types = [SUBREDDIT_TYPE_REVERSE[k] for k in subreddit_types]
for subreddit_type in subreddit_types:
index = SUBREDDIT_TYPE[subreddit_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND subreddit_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(subreddit_type), count)
statisticoutput += '\n'
statisticoutput += 'Submission type (None means approved submitters only or inaccessible):\n'
submission_types = list(SUBMISSION_TYPE_REVERSE.keys())
submission_types.sort()
submission_types = [SUBMISSION_TYPE_REVERSE[k] for k in submission_types]
for submission_type in submission_types:
index = SUBMISSION_TYPE[submission_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND submission_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(submission_type), count)
statisticoutput += '\n\n'
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')
last20k = cur.fetchall()
timediff = last20k[0][SQL_SUBREDDIT['created']] - last20k[-1][SQL_SUBREDDIT['created']]
statisticoutput += 'Over the last 20,000 subreddits:\n'
statisticoutput += '%.2f subs are created each hour\n' % (20000 / (timediff/3600))
statisticoutput += '%.2f subs are created each day\n\n\n' % (20000 / (timediff/86400))
################################
# Breakdown by time period
# hour of day, day of week, day of month, month of year, month-year, year
def datetimedict(statsdict, strf):
statsdict[strf] = statsdict.get(strf, 0) + 1
hoddict = {}
dowdict = {}
domdict = {}
moydict = {}
myrdict = {}
yerdict = {}
print(' performing time breakdown')
cur.execute('SELECT * FROM subreddits WHERE created != 0')
for item in fetchgenerator(cur):
dt = datetime.datetime.utcfromtimestamp(item[SQL_SUBREDDIT['created']])
datetimedict(hoddict, dt.strftime('%H')) # 01
datetimedict(dowdict, dt.strftime('%A')) # Monday
datetimedict(domdict, dt.strftime('%d')) # 01
datetimedict(moydict, dt.strftime('%B')) # January
datetimedict(myrdict, dt.strftime('%b%Y')) # Jan2015
datetimedict(yerdict, dt.strftime('%Y')) # 2015
print(' forming columns')
plotnum = 0
mapping = [
{'label': 'hour of day', 'specialsort': None, 'dict': hoddict},
{'label': 'day of week', 'specialsort': 'day', 'dict': dowdict},
{'label': 'day of month', 'specialsort': None, 'dict': domdict},
{'label': 'month of year', 'specialsort': 'month', 'dict': moydict},
{'label': 'year', 'specialsort': None, 'dict': yerdict},
{'label': 'month-year', 'specialsort': 'monthyear', 'dict': myrdict},
{'label': 'name length', 'specialsort': None, 'dict': name_lengths},
]
for (index, collection) in enumerate(mapping):
d = collection['dict']
dkeys_primary = list(d.keys())
dkeys_primary.sort(key=d.get)
dkeys_secondary = specialsort(dkeys_primary, collection['specialsort'])
dvals = [d[x] for x in dkeys_secondary]
statisticoutput += collection['label'] + '\n'
for (keyindex, key) in enumerate(dkeys_primary):
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += ' ' * 8
key = dkeys_secondary[keyindex]
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += '\n'
statisticoutput += '\n'
if d is name_lengths:
upperlabel = 'Name Lengths'
else:
upperlabel = 'Subreddits created - %s' % collection['label']
plotbars(
filename=upperlabel,
upperlabel=upperlabel,
inputdata=[dkeys_secondary, dvals],
colormid='#43443a',
forcezero=True,
)
plotnum += 1
if d is myrdict:
# In addition to the total month graph, plot the last 15 months
plotbars(
filename=upperlabel + ' short',
upperlabel=upperlabel + ' short',
inputdata=[dkeys_secondary[-15:], dvals[-15:]],
colorbg='#272822',
colorfg='#000',
colormid='#43443a',
forcezero=True,
)
plotnum += 1
#
# Breakdown by time period
################################
print(statisticoutput, file=file_stats)
file_stats.close()
print('Updating Readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####' + headline
readmelines[5] = '#####[Today\'s jumble](http://reddit.com/r/%s)\n' % jumble(nsfw=False)
file_readme = open('README.md', 'w')
file_readme.write(''.join(readmelines))
file_readme.close()
time.sleep(2)
subprocess.call('PNGCREATOR.bat', shell=True, cwd='spooky')
print()
def memberformat(member):
member = FORMAT_MEMBER.format(
idstr=member[SQL_SUBREDDIT['idstr']],
human=member[SQL_SUBREDDIT['human']],
nsfw=member[SQL_SUBREDDIT['nsfw']],
name=member[SQL_SUBREDDIT['name']],
subscribers=member[SQL_SUBREDDIT['subscribers']],
)
return member
def dictadding(targetdict, item):
if item not in targetdict:
targetdict[item] = 1
else:
targetdict[item] = targetdict[item] + 1
return targetdict
def specialsort(inlist, mode=None):
if mode == 'month':
return [
'January',
'February',
'March', 'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December'
]
if mode == 'day':
return [
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday'
]
if mode == 'monthyear':
td = {}
for item in inlist:
nitem = item
nitem = item.replace(item[:3], monthnumbers[item[:3]])
nitem = nitem[3:] + nitem[:3]
td[item] = nitem
tdkeys = list(td.keys())
#print(td)
tdkeys.sort(key=td.get)
#print(tdkeys)
return tdkeys
if mode is None:
return sorted(inlist)
def search(
query='',
casesense=False,
filterout=[],
subscribers=0,
nsfwmode=2,
doreturn=False,
sort=None,
):
'''
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
'''
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%{term}%%'.format(term=querys)
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0, 1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_SUBREDDIT['name']]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
continue
if any(filters in name for filters in filterout):
#print('%s not filter %s' % (querys, name))
continue
results.append(item)
if len(results) == 0:
if doreturn:
return []
else:
return
if sort is not None:
is_numeric = isinstance(results[0][sort], int)
if is_numeric:
results.sort(key=lambda x: x[sort], reverse=True)
else:
results.sort(key=lambda x: x[sort].lower())
if doreturn is True:
return results
else:
for item in results:
print(item)
def findwrong():
cur.execute('SELECT * FROM subreddits WHERE name != ?', ['?'])
fetch = cur.fetchall()
fetch.sort(key=lambda x: x[SQL_SUBREDDIT['idstr']])
#sorted by ID
fetch = fetch[25:]
pos = 0
wrongs = []
while pos < len(fetch)-5:
if fetch[pos][1] > fetch[pos+1][1]:
wrongs.append(str(fetch[pos-1]))
wrongs.append(str(fetch[pos]))
wrongs.append(str(fetch[pos+1]) + "\n")
pos += 1
for wrong in wrongs:
print(wrong)
def processjumble(count, nsfw=False):
for x in range(count):
sub = r.get_random_subreddit(nsfw=nsfw)
process(sub, commit=False)
last_seen = int(get_now())
cur.execute('SELECT * FROM jumble WHERE idstr == ?', [sub.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO jumble VALUES(?, ?)', [sub.id, last_seen])
else:
cur.execute(
'UPDATE jumble SET last_seen = ? WHERE idstr == ?',
[sub.id, last_seen]
)
sql.commit()
def processpopular(count, sort='hot'):
subreddit = r.get_subreddit('popular')
if sort == 'hot':
submissions = subreddit.get_hot(limit=count)
elif sort == 'new':
submissions = subreddit.get_new(limit=count)
else:
raise ValueError(sort)
submissions = list(submissions)
subreddit_ids = list({submission.subreddit_id for submission in submissions})
subreddits = processmega(subreddit_ids, commit=False)
last_seen = int(get_now())
for subreddit in subreddits:
cur.execute('SELECT * FROM popular WHERE idstr == ?', [subreddit.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO popular VALUES(?, ?)', [subreddit.id, last_seen])
else:
cur.execute(
'UPDATE popular SET last_seen = ? WHERE idstr == ?',
[last_seen, subreddit.id]
)
sql.commit()
def jumble(count=20, nsfw=False):
subreddits = get_jumble_subreddits()
if nsfw is not None:
subreddits = [x for x in subreddits if x[SQL_SUBREDDIT['nsfw']] == int(bool(nsfw))]
random.shuffle(subreddits)
subreddits = subreddits[:count]
subreddits = [f[:-1] for f in subreddits]
jumble_string = [x[SQL_SUBREDDIT['name']] for x in subreddits]
jumble_string = '+'.join(jumble_string)
output = [jumble_string, subreddits]
return jumble_string
def rounded(x, rounding=100):
return int(round(x/rounding)) * rounding
def plotbars(
filename,
inputdata,
upperlabel='Subreddits created',
colorbg="#fff",
colorfg="#000",
colormid="#888",
forcezero=False,
):
'''
Create postscript vectors of data
filename = Name of the file without extension
inputdata = A list of two lists. First list has the x axis labels, second list
has the y axis data. x label 14 coresponds to y datum 14, etc.
'''
print(' Printing', filename)
t=tkinter.Tk()
canvas = tkinter.Canvas(t, width=3840, height=2160, bg=colorbg)
canvas.pack()
#Y axis
canvas.create_line(430, 250, 430, 1755, width=10, fill=colorfg)
#X axis
canvas.create_line(430, 1750, 3590, 1750, width=10, fill=colorfg)
dkeys = inputdata[0]
dvals = inputdata[1]
entrycount = len(dkeys)
availablespace = 3140
availableheight= 1490
entrywidth = availablespace / entrycount
#print(dkeys, dvals, "Width:", entrywidth)
smallest = min(dvals)
bottom = int(smallest*0.75) - 5
bottom = 0 if bottom < 8 else rounded(bottom, 10)
if forcezero:
bottom = 0
largest = max(dvals)
top = int(largest + (largest / 5))
top = rounded(top, 10)
print(bottom, top)
span = top - bottom
perpixel = span / availableheight
curx = 445
cury = 1735
labelx = 420
labely = 255
#canvas.create_text(labelx, labely, text=str(top), font=("Consolas", 72), anchor="e")
labelspan = 130
canvas.create_text(175, 100, text=upperlabel, font=("Consolas", 72), anchor="w", fill=colorfg)
for x in range(12):
value = int(top -((labely - 245) * perpixel))
value = rounded(value, 10)
value = '{0:,}'.format(value)
canvas.create_text(labelx, labely, text=value, font=("Consolas", 72), anchor="e", fill=colorfg)
canvas.create_line(430, labely, 3590, labely, width=2, fill=colormid)
labely += labelspan
for entrypos in range(entrycount):
entry = dkeys[entrypos]
entryvalue = dvals[entrypos]
entryx0 = curx + 10
entryx1 = entryx0 + (entrywidth-10)
curx += entrywidth
entryy0 = cury
entryy1 = entryvalue - bottom
entryy1 = entryy1/perpixel
#entryy1 -= bottom
#entryy1 /= perpixel
entryy1 = entryy0 - entryy1
#print(perpixel, entryy1)
#print(entry, entryx0,entryy0, entryx1, entryy1)
canvas.create_rectangle(entryx0, entryy0, entryx1, entryy1, fill=colorfg, outline=colorfg)
font0x = entryx0 + (entrywidth / 2)
font0y = entryy1 - 5
font1y = 1760
entryvalue = round(entryvalue)
fontsize0 = len(str(entryvalue))
fontsize0 = round(entrywidth / fontsize0) + 3
fontsize0 = 100 if fontsize0 > 100 else fontsize0
fontsize1 = len(str(entry))
fontsize1 = round(1.5 * entrywidth / fontsize1) + 5
fontsize1 = 60 if fontsize1 > 60 else fontsize1
canvas.create_text(font0x, font0y, text=entryvalue, font=("Consolas", fontsize0), anchor="s", fill=colorfg)
canvas.create_text(font0x, font1y, text=entry, font=("Consolas", fontsize1), anchor="n", fill=colorfg)
canvas.update()
print(' Done')
canvas.postscript(file=f'spooky\\{filename}.ps', width=3840, height=2160)
t.geometry("1x1+1+1")
t.update()
t.destroy()
def _idle():
while True:
try:
modernize()
processpopular(100, 'new')
processjumble(30, nsfw=False)
processjumble(30, nsfw=True)
print('Great job!')
except Exception:
traceback.print_exc()
time.sleep(180)
# Command line #####################################################################################
DOCSTRING = '''
Subreddit Birthdays
===================
{modernize_forever}
{modernize_once}
'''
SUB_DOCSTRINGS = dict(
modernize_forever='''
modernize_forever:
Gather new subreddits forever.
''',
modernize_once='''
modernize_once:
Gather new subreddits once.
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
NOTIFY_EVERY_LINE = mutables.Boolean(False)
@pipeable.ctrlc_return1
def modernize_once_argparse(args):
login()
modernize(limit=args.limit)
return 0
@pipeable.ctrlc_return1
def modernize_forever_argparse(args):
login()
NOTIFY_EVERY_LINE.set(True)
modernize_forever()
return 0
@operatornotify.main_decorator(subject='sb', notify_every_line=NOTIFY_EVERY_LINE)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=DOCSTRING)
subparsers = parser.add_subparsers()
p_modernize_once = subparsers.add_parser('modernize_once', aliases=['modernize-once'])
p_modernize_once.add_argument('--limit', default=None)
p_modernize_once.set_defaults(func=modernize_once_argparse)
p_modernize_forever = subparsers.add_parser('modernize_forever', aliases=['modernize-forever'])
p_modernize_forever.set_defaults(func=modernize_forever_argparse)
return betterhelp.subparser_main(argv, parser, DOCSTRING, SUB_DOCSTRINGS)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
[
"argparse.ArgumentParser",
"voussoirkit.vlogging.getLogger",
"random.shuffle",
"bot3.login",
"voussoirkit.betterhelp.add_previews",
"praw3.Reddit",
"traceback.print_exc",
"random.randint",
"voussoirkit.betterhelp.subparser_main",
"datetime.datetime.utcfromtimestamp",
"traceback.format_exc",
"datetime.datetime.now",
"tkinter.Tk",
"voussoirkit.operatornotify.main_decorator",
"datetime.datetime.strftime",
"voussoirkit.mutables.Boolean",
"time.sleep",
"sqlite3.connect",
"subprocess.call",
"tkinter.Canvas",
"voussoirkit.sqlhelpers.update_filler",
"voussoirkit.sqlhelpers.insert_filler",
"voussoirkit.sqlhelpers.executescript"
] |
[((410, 444), 'voussoirkit.vlogging.getLogger', 'vlogging.getLogger', (['__name__', '"""sb"""'], {}), "(__name__, 'sb')\n", (428, 444), False, 'from voussoirkit import vlogging\n'), ((2853, 2914), 'sqlite3.connect', 'sqlite3.connect', (['"""D:\\\\git\\\\reddit\\\\subredditbirthdays\\\\sb.db"""'], {}), "('D:\\\\git\\\\reddit\\\\subredditbirthdays\\\\sb.db')\n", (2868, 2914), False, 'import sqlite3\n'), ((2915, 2965), 'voussoirkit.sqlhelpers.executescript', 'sqlhelpers.executescript', ([], {'conn': 'sql', 'script': 'DB_INIT'}), '(conn=sql, script=DB_INIT)\n', (2939, 2965), False, 'from voussoirkit import sqlhelpers\n'), ((36497, 36547), 'voussoirkit.betterhelp.add_previews', 'betterhelp.add_previews', (['DOCSTRING', 'SUB_DOCSTRINGS'], {}), '(DOCSTRING, SUB_DOCSTRINGS)\n', (36520, 36547), False, 'from voussoirkit import betterhelp\n'), ((36568, 36591), 'voussoirkit.mutables.Boolean', 'mutables.Boolean', (['(False)'], {}), '(False)\n', (36584, 36591), False, 'from voussoirkit import mutables\n'), ((36855, 36940), 'voussoirkit.operatornotify.main_decorator', 'operatornotify.main_decorator', ([], {'subject': '"""sb"""', 'notify_every_line': 'NOTIFY_EVERY_LINE'}), "(subject='sb', notify_every_line=NOTIFY_EVERY_LINE\n )\n", (36884, 36940), False, 'from voussoirkit import operatornotify\n'), ((4277, 4299), 'praw3.Reddit', 'praw.Reddit', (['USERAGENT'], {}), '(USERAGENT)\n', (4288, 4299), True, 'import praw3 as praw\n'), ((4304, 4317), 'bot3.login', 'bot3.login', (['r'], {}), '(r)\n', (4314, 4317), False, 'import bot3\n'), ((7321, 7366), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (7355, 7366), False, 'import datetime\n'), ((7379, 7435), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['day', '"""%b %d %Y %H:%M:%S UTC"""'], {}), "(day, '%b %d %Y %H:%M:%S UTC')\n", (7405, 7435), False, 'import datetime\n'), ((25172, 25185), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (25182, 25185), False, 'import time\n'), ((25190, 25249), 'subprocess.call', 'subprocess.call', (['"""PNGCREATOR.bat"""'], {'shell': '(True)', 'cwd': '"""spooky"""'}), "('PNGCREATOR.bat', shell=True, cwd='spooky')\n", (25205, 25249), False, 'import subprocess\n'), ((32050, 32076), 'random.shuffle', 'random.shuffle', (['subreddits'], {}), '(subreddits)\n', (32064, 32076), False, 'import random\n'), ((32908, 32920), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (32918, 32920), False, 'import tkinter\n'), ((32935, 32989), 'tkinter.Canvas', 'tkinter.Canvas', (['t'], {'width': '(3840)', 'height': '(2160)', 'bg': 'colorbg'}), '(t, width=3840, height=2160, bg=colorbg)\n', (32949, 32989), False, 'import tkinter\n'), ((36990, 37036), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DOCSTRING'}), '(description=DOCSTRING)\n', (37013, 37036), False, 'import argparse\n'), ((37476, 37542), 'voussoirkit.betterhelp.subparser_main', 'betterhelp.subparser_main', (['argv', 'parser', 'DOCSTRING', 'SUB_DOCSTRINGS'], {}), '(argv, parser, DOCSTRING, SUB_DOCSTRINGS)\n', (37501, 37542), False, 'from voussoirkit import betterhelp\n'), ((8306, 8321), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (8316, 8321), False, 'import time\n'), ((15670, 15698), 'random.randint', 'random.randint', (['lower', 'upper'], {}), '(lower, upper)\n', (15684, 15698), False, 'import random\n'), ((21907, 21973), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (["item[SQL_SUBREDDIT['created']]"], {}), "(item[SQL_SUBREDDIT['created']])\n", (21941, 21973), False, 'import datetime\n'), ((36086, 36101), 'time.sleep', 'time.sleep', (['(180)'], {}), '(180)\n', (36096, 36101), False, 'import time\n'), ((6314, 6332), 'time.sleep', 'time.sleep', (['sleepy'], {}), '(sleepy)\n', (6324, 6332), False, 'import time\n'), ((6406, 6427), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6425, 6427), False, 'import traceback\n'), ((7228, 7272), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (7249, 7272), False, 'import datetime\n'), ((10877, 10930), 'voussoirkit.sqlhelpers.insert_filler', 'sqlhelpers.insert_filler', (['SQL_SUBREDDIT_COLUMNS', 'data'], {}), '(SQL_SUBREDDIT_COLUMNS, data)\n', (10901, 10930), False, 'from voussoirkit import sqlhelpers\n'), ((12373, 12422), 'voussoirkit.sqlhelpers.update_filler', 'sqlhelpers.update_filler', (['data'], {'where_key': '"""idstr"""'}), "(data, where_key='idstr')\n", (12397, 12422), False, 'from voussoirkit import sqlhelpers\n'), ((11615, 11669), 'voussoirkit.sqlhelpers.insert_filler', 'sqlhelpers.insert_filler', (['SQL_SUSPICIOUS_COLUMNS', 'data'], {}), '(SQL_SUSPICIOUS_COLUMNS, data)\n', (11639, 11669), False, 'from voussoirkit import sqlhelpers\n'), ((13150, 13171), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13169, 13171), False, 'import traceback\n'), ((14821, 14842), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14840, 14842), False, 'import traceback\n'), ((15804, 15832), 'random.randint', 'random.randint', (['lower', 'upper'], {}), '(lower, upper)\n', (15818, 15832), False, 'import random\n'), ((36056, 36077), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (36075, 36077), False, 'import traceback\n'), ((8274, 8296), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8294, 8296), False, 'import traceback\n'), ((14412, 14433), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14431, 14433), False, 'import traceback\n'), ((6280, 6301), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6299, 6301), False, 'import traceback\n')]
|
from cleantxt import text
from tqdm import tqdm
import argparse
import os
def rule(s):
try:
k, v = map(str, s.split(','))
return k, v
except:
raise argparse.ArgumentTypeError("Escape Rule must be key,value ")
def main():
parser = argparse.ArgumentParser(
prog="cleantxt cleaning text from noise commande line interface",
description="Arguments for cleantxt to clean document from noise (cleantxt)",
usage=""" cleantxt --doc=[path_to_doc]
--out=[path_out_file]
--f=[0]
--t=[100]
--do_lower=True
--white_space=True
--punctuation=True
--duplicated_chars=True
--alpha_num=True
--accent=True
--escape key,value ə,a œ,oe""",
allow_abbrev=False
)
parser.add_argument(
"--doc",
type=str,
help="path of document to clean it",
required=True
)
parser.add_argument(
"--out",
default="out.txt",
type=str,
help="path of clean document (default out.txt)",
required=False
)
parser.add_argument(
"--f",
default=0,
type=int,
help="index of starting document (default 0)",
required=False
)
parser.add_argument(
"--t",
default=None,
type=int,
help="index of end of document (default None) meaning the end of document",
required=False
)
parser.add_argument(
"--escape",
default=False,
type=rule,
help="Custom escape rules list with tuple k,v space k1,v1 ...",
required=False,
nargs='+'
)
parser.add_argument(
"--do_lower",
default=True,
type=bool,
help="Lower case all text (default True)",
required=False
)
parser.add_argument(
"--white_space",
default=True,
type=bool,
help="Escape more then one spaces (default True)",
required=False
)
parser.add_argument(
"--punctuation",
default=False,
type=bool,
help="Escape punctuation (default False)",
required=False
)
parser.add_argument(
"--duplicated_chars",
default=False,
type=bool,
help="Escape duplicated chars more then two time (default False)",
required=False
)
parser.add_argument(
"--alpha_num",
default=True,
type=bool,
help="Escape non alpha numeric chars (default True)",
required=False
)
parser.add_argument(
"--accent",
default=False,
type=bool,
help="Escape accents (default False)",
required=False
)
args = parser.parse_args()
if args.t:
if args.f > args.t:
raise Exception("--f must be lower then --t")
if not os.path.exists(args.doc):
raise FileNotFoundError(
'document not exist : {}'.format(args.doc)
)
if os.path.splitext(args.doc)[1] not in ['.txt', '.tab']:
raise Exception(
'file not accepted please chose (txt) or (tab) file'
)
file = open(args.doc, mode='r', encoding='utf8')
data = file.readlines()
file.close()
if args.t:
data_process = data[args.f:args.t]
else:
data_process = data
if args.escape:
escape = args.escape
else:
escape = None
with open(args.out, mode='w+', encoding='utf8') as out_file:
for x in tqdm(data_process, desc='clean document with cleantxt cli'):
out_file.write(
text.clean_text(
x,
whitespace=args.white_space,
punctuation=args.punctuation,
duplicated=args.duplicated_chars,
alphnum=args.alpha_num,
accent=args.accent,
others=escape
) + '\n'
)
if __name__ == '__main__':
main()
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"cleantxt.text.clean_text",
"os.path.exists",
"os.path.splitext",
"argparse.ArgumentTypeError"
] |
[((270, 894), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""cleantxt cleaning text from noise commande line interface"""', 'description': '"""Arguments for cleantxt to clean document from noise (cleantxt)"""', 'usage': '""" cleantxt --doc=[path_to_doc] \n --out=[path_out_file]\n --f=[0] \n --t=[100] \n --do_lower=True\n --white_space=True \n --punctuation=True \n --duplicated_chars=True\n --alpha_num=True \n --accent=True \n --escape key,value ə,a œ,oe"""', 'allow_abbrev': '(False)'}), '(prog=\n \'cleantxt cleaning text from noise commande line interface\',\n description=\n \'Arguments for cleantxt to clean document from noise (cleantxt)\', usage\n =\n """ cleantxt --doc=[path_to_doc] \n --out=[path_out_file]\n --f=[0] \n --t=[100] \n --do_lower=True\n --white_space=True \n --punctuation=True \n --duplicated_chars=True\n --alpha_num=True \n --accent=True \n --escape key,value ə,a œ,oe"""\n , allow_abbrev=False)\n', (293, 894), False, 'import argparse\n'), ((2991, 3015), 'os.path.exists', 'os.path.exists', (['args.doc'], {}), '(args.doc)\n', (3005, 3015), False, 'import os\n'), ((3640, 3699), 'tqdm.tqdm', 'tqdm', (['data_process'], {'desc': '"""clean document with cleantxt cli"""'}), "(data_process, desc='clean document with cleantxt cli')\n", (3644, 3699), False, 'from tqdm import tqdm\n'), ((182, 242), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Escape Rule must be key,value """'], {}), "('Escape Rule must be key,value ')\n", (208, 242), False, 'import argparse\n'), ((3123, 3149), 'os.path.splitext', 'os.path.splitext', (['args.doc'], {}), '(args.doc)\n', (3139, 3149), False, 'import os\n'), ((3745, 3924), 'cleantxt.text.clean_text', 'text.clean_text', (['x'], {'whitespace': 'args.white_space', 'punctuation': 'args.punctuation', 'duplicated': 'args.duplicated_chars', 'alphnum': 'args.alpha_num', 'accent': 'args.accent', 'others': 'escape'}), '(x, whitespace=args.white_space, punctuation=args.\n punctuation, duplicated=args.duplicated_chars, alphnum=args.alpha_num,\n accent=args.accent, others=escape)\n', (3760, 3924), False, 'from cleantxt import text\n')]
|
import utils as util
import tensorflow as tf
import numpy as np
def forecast_model(series, time,forecastDays):
split_time=2555
time_train=time[:split_time]
x_train=series[:split_time]
split_time_test=3285
time_valid=time[split_time:split_time_test]
x_valid=series[split_time:split_time_test]
time_test=time[split_time_test:]
x_test=series[split_time_test:]
window_size=30
batch_size=32
shuffle_buffer_size=1000
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = util.windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
valid_set=util.windowed_dataset(x_valid,window_size,batch_size,shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,validation_data=(valid_set),epochs=5)
rnn_forecast = util.model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
mae=tf.keras.metrics.mean_absolute_error(x_test, rnn_forecast[:365]).numpy()
accuracy=100-mae
return (accuracy,mae,rnn_forecast[:forecastDays])
|
[
"tensorflow.random.set_seed",
"tensorflow.keras.metrics.mean_absolute_error",
"numpy.random.seed",
"utils.windowed_dataset",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.losses.Huber",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Lambda",
"utils.model_forecast"
] |
[((461, 493), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (491, 493), True, 'import tensorflow as tf\n'), ((498, 520), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(51)'], {}), '(51)\n', (516, 520), True, 'import tensorflow as tf\n'), ((525, 543), 'numpy.random.seed', 'np.random.seed', (['(51)'], {}), '(51)\n', (539, 543), True, 'import numpy as np\n'), ((560, 662), 'utils.windowed_dataset', 'util.windowed_dataset', (['x_train'], {'window_size': '(60)', 'batch_size': '(100)', 'shuffle_buffer': 'shuffle_buffer_size'}), '(x_train, window_size=60, batch_size=100,\n shuffle_buffer=shuffle_buffer_size)\n', (581, 662), True, 'import utils as util\n'), ((673, 749), 'utils.windowed_dataset', 'util.windowed_dataset', (['x_valid', 'window_size', 'batch_size', 'shuffle_buffer_size'], {}), '(x_valid, window_size, batch_size, shuffle_buffer_size)\n', (694, 749), True, 'import utils as util\n'), ((1292, 1339), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(1e-05)', 'momentum': '(0.9)'}), '(lr=1e-05, momentum=0.9)\n', (1315, 1339), True, 'import tensorflow as tf\n'), ((1549, 1613), 'utils.model_forecast', 'util.model_forecast', (['model', 'series[..., np.newaxis]', 'window_size'], {}), '(model, series[..., np.newaxis], window_size)\n', (1568, 1613), True, 'import utils as util\n'), ((792, 917), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': '(60)', 'kernel_size': '(5)', 'strides': '(1)', 'padding': '"""causal"""', 'activation': '"""relu"""', 'input_shape': '[None, 1]'}), "(filters=60, kernel_size=5, strides=1, padding=\n 'causal', activation='relu', input_shape=[None, 1])\n", (814, 917), True, 'import tensorflow as tf\n'), ((990, 1037), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(60)'], {'return_sequences': '(True)'}), '(60, return_sequences=True)\n', (1010, 1037), True, 'import tensorflow as tf\n'), ((1043, 1090), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(60)'], {'return_sequences': '(True)'}), '(60, return_sequences=True)\n', (1063, 1090), True, 'import tensorflow as tf\n'), ((1096, 1140), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (1117, 1140), True, 'import tensorflow as tf\n'), ((1146, 1190), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (1167, 1190), True, 'import tensorflow as tf\n'), ((1196, 1220), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1217, 1220), True, 'import tensorflow as tf\n'), ((1226, 1267), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x * 400)'], {}), '(lambda x: x * 400)\n', (1248, 1267), True, 'import tensorflow as tf\n'), ((1362, 1385), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {}), '()\n', (1383, 1385), True, 'import tensorflow as tf\n'), ((1690, 1754), 'tensorflow.keras.metrics.mean_absolute_error', 'tf.keras.metrics.mean_absolute_error', (['x_test', 'rnn_forecast[:365]'], {}), '(x_test, rnn_forecast[:365])\n', (1726, 1754), True, 'import tensorflow as tf\n')]
|
"""BaseHomMag class code
DOCSTRINGS V4 READY
"""
from magpylib._src.input_checks import check_format_input_scalar
from magpylib._src.input_checks import check_format_input_vector
class BaseHomMag:
"""provides the magnetization attribute for homogeneously magnetized magnets"""
def __init__(self, magnetization):
self.magnetization = magnetization
@property
def magnetization(self):
"""Object magnetization attribute getter and setter."""
return self._magnetization
@magnetization.setter
def magnetization(self, mag):
"""Set magnetization vector, array_like, shape (3,), unit [mT]."""
self._magnetization = check_format_input_vector(
mag,
dims=(1,),
shape_m1=3,
sig_name="magnetization",
sig_type="array_like (list, tuple, ndarray) with shape (3,)",
allow_None=True,
)
class BaseCurrent:
"""provides scalar current attribute"""
def __init__(self, current):
self.current = current
@property
def current(self):
"""Object current attribute getter and setter."""
return self._current
@current.setter
def current(self, current):
"""Set current value, scalar, unit [A]."""
# input type and init check
self._current = check_format_input_scalar(
current,
sig_name="current",
sig_type="`None` or a number (int, float)",
allow_None=True,
)
|
[
"magpylib._src.input_checks.check_format_input_vector",
"magpylib._src.input_checks.check_format_input_scalar"
] |
[((676, 844), 'magpylib._src.input_checks.check_format_input_vector', 'check_format_input_vector', (['mag'], {'dims': '(1,)', 'shape_m1': '(3)', 'sig_name': '"""magnetization"""', 'sig_type': '"""array_like (list, tuple, ndarray) with shape (3,)"""', 'allow_None': '(True)'}), "(mag, dims=(1,), shape_m1=3, sig_name=\n 'magnetization', sig_type=\n 'array_like (list, tuple, ndarray) with shape (3,)', allow_None=True)\n", (701, 844), False, 'from magpylib._src.input_checks import check_format_input_vector\n'), ((1337, 1457), 'magpylib._src.input_checks.check_format_input_scalar', 'check_format_input_scalar', (['current'], {'sig_name': '"""current"""', 'sig_type': '"""`None` or a number (int, float)"""', 'allow_None': '(True)'}), "(current, sig_name='current', sig_type=\n '`None` or a number (int, float)', allow_None=True)\n", (1362, 1457), False, 'from magpylib._src.input_checks import check_format_input_scalar\n')]
|
# coding: utf-8
"""
Views handling the legacy import to EDD.
"""
import json
import logging
import uuid
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.views import generic
from requests import codes
from edd.notify.backend import RedisBroker
from .. import models as edd_models
from .. import tasks
from ..importer import parser, table
from .study import StudyObjectMixin
logger = logging.getLogger(__name__)
# /study/<study_id>/import/
class ImportTableView(StudyObjectMixin, generic.DetailView):
def delete(self, request, *args, **kwargs):
study = self.object = self.get_object()
if not study.user_can_write(request.user):
# TODO: uncovered code
return HttpResponse(status=codes.forbidden)
# END uncovered code
# Note: we validate the input UUID to avoid exposing the capability to delete any
# arbitrary cache entry from redis. As a stopgap, we'll allow any authenticated user to
# delete the temporary cache for the import. we should revisit this when re-casting
# imports as REST resources. Low risk ATM for a user to delete someone else's WIP import,
# since they'd have to both catch it before it's processed AND have its UUID.
import_id = request.body.decode("utf-8")
try:
uuid.UUID(import_id)
except ValueError:
return HttpResponse(
f'Invalid import id "{import_id}"', status=codes.bad_request
)
try:
broker = table.ImportBroker()
broker.clear_pages(import_id)
return HttpResponse(status=codes.ok)
# TODO: uncovered code
except Exception as e:
logger.exception(f"Import delete failed: {e}")
# return error synchronously so it can be displayed right away in context.
# no need for a separate notification here
messages.error(request, str(e))
# END uncovered code
def get(self, request, *args, **kwargs):
# TODO: uncovered code
study = self.object = self.get_object()
user_can_write = study.user_can_write(request.user)
# FIXME protocol display on import page should be an autocomplete
protocols = edd_models.Protocol.objects.order_by("name")
return render(
request,
"main/import.html",
context={
"study": study,
"protocols": protocols,
"writable": user_can_write,
"import_id": uuid.uuid4(),
"page_size_limit": settings.EDD_IMPORT_PAGE_SIZE,
"page_count_limit": settings.EDD_IMPORT_PAGE_LIMIT,
},
)
# END uncovered code
def _parse_payload(self, request):
# init storage for task and parse request body
broker = table.ImportBroker()
payload = json.loads(request.body)
# check requested import parameters are acceptable
import_id = payload["importId"]
series = payload["series"]
pages = payload["totalPages"]
broker.check_bounds(import_id, series, pages)
# store the series of points for the task to read later
count = broker.add_page(import_id, json.dumps(series))
# only on the first page, store the import context
if payload["page"] == 1:
del payload["series"]
# include an update record from the original request
update = edd_models.Update.load_request_update(request)
payload.update(update_id=update.id)
broker.set_context(import_id, json.dumps(payload))
return import_id, count == pages
def post(self, request, *args, **kwargs):
study = self.object = self.get_object()
try:
import_id, done = self._parse_payload(request)
if done:
# once all pages are parsed, submit task and send notification
logger.debug(f"Submitting Celery task for import {import_id}")
result = tasks.import_table_task.delay(
study.pk, request.user.pk, import_id
)
RedisBroker(request.user).notify(
_(
"Data is submitted for import. You may continue to use EDD, "
"another message will appear once the import is complete."
),
uuid=result.id,
)
return JsonResponse(data={}, status=codes.accepted)
# TODO: uncovered code
except table.ImportTooLargeException as e:
return HttpResponse(str(e), status=codes.request_entity_too_large)
except table.ImportBoundsException as e:
return HttpResponse(str(e), status=codes.bad_request)
except table.ImportException as e:
return HttpResponse(str(e), status=codes.server_error)
except RuntimeError as e:
logger.exception(f"Data import failed: {e}")
# return error synchronously so it can be displayed right away in context.
# no need for a separate notification here
messages.error(request, e)
# END uncovered
# /utilities/parsefile/
# To reach this function, files are sent from the client by the Utl.FileDropZone class (in Utl.ts).
def utilities_parse_import_file(request):
"""
Attempt to process posted data as either a TSV or CSV file or Excel spreadsheet and extract a
table of data automatically.
"""
file = request.FILES.get("file")
import_mode = request.POST.get("import_mode", parser.ImportModeFlags.STANDARD)
parse_fn = parser.find_parser(import_mode, file.content_type)
if parse_fn:
try:
result = parse_fn(file)
return JsonResponse(
{"file_type": result.file_type, "file_data": result.parsed_data}
)
# TODO: uncovered code
except Exception as e:
logger.exception(f"Import file parse failed: {e}")
return JsonResponse({"python_error": str(e)}, status=codes.server_error)
# END uncovered
return JsonResponse(
{
"python_error": _(
"The uploaded file could not be interpreted as either an Excel "
"spreadsheet or an XML file. Please check that the contents are "
"formatted correctly. (Word documents are not allowed!)"
)
},
status=codes.server_error,
)
|
[
"uuid.uuid4",
"json.loads",
"django.http.HttpResponse",
"django.contrib.messages.error",
"edd.notify.backend.RedisBroker",
"json.dumps",
"django.http.JsonResponse",
"uuid.UUID",
"django.utils.translation.ugettext",
"logging.getLogger"
] |
[((558, 585), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (575, 585), False, 'import logging\n'), ((3062, 3086), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (3072, 3086), False, 'import json\n'), ((878, 914), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'codes.forbidden'}), '(status=codes.forbidden)\n', (890, 914), False, 'from django.http import HttpResponse, JsonResponse\n'), ((1486, 1506), 'uuid.UUID', 'uuid.UUID', (['import_id'], {}), '(import_id)\n', (1495, 1506), False, 'import uuid\n'), ((1775, 1804), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': 'codes.ok'}), '(status=codes.ok)\n', (1787, 1804), False, 'from django.http import HttpResponse, JsonResponse\n'), ((3420, 3438), 'json.dumps', 'json.dumps', (['series'], {}), '(series)\n', (3430, 3438), False, 'import json\n'), ((4666, 4710), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'codes.accepted'}), '(data={}, status=codes.accepted)\n', (4678, 4710), False, 'from django.http import HttpResponse, JsonResponse\n'), ((5981, 6059), 'django.http.JsonResponse', 'JsonResponse', (["{'file_type': result.file_type, 'file_data': result.parsed_data}"], {}), "({'file_type': result.file_type, 'file_data': result.parsed_data})\n", (5993, 6059), False, 'from django.http import HttpResponse, JsonResponse\n'), ((6387, 6577), 'django.utils.translation.ugettext', '_', (['"""The uploaded file could not be interpreted as either an Excel spreadsheet or an XML file. Please check that the contents are formatted correctly. (Word documents are not allowed!)"""'], {}), "('The uploaded file could not be interpreted as either an Excel spreadsheet or an XML file. Please check that the contents are formatted correctly. (Word documents are not allowed!)'\n )\n", (6388, 6577), True, 'from django.utils.translation import ugettext as _\n'), ((1553, 1627), 'django.http.HttpResponse', 'HttpResponse', (['f"""Invalid import id "{import_id}\\""""'], {'status': 'codes.bad_request'}), '(f\'Invalid import id "{import_id}"\', status=codes.bad_request)\n', (1565, 1627), False, 'from django.http import HttpResponse, JsonResponse\n'), ((3789, 3808), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3799, 3808), False, 'import json\n'), ((5343, 5369), 'django.contrib.messages.error', 'messages.error', (['request', 'e'], {}), '(request, e)\n', (5357, 5369), False, 'from django.contrib import messages\n'), ((2709, 2721), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2719, 2721), False, 'import uuid\n'), ((4398, 4523), 'django.utils.translation.ugettext', '_', (['"""Data is submitted for import. You may continue to use EDD, another message will appear once the import is complete."""'], {}), "('Data is submitted for import. You may continue to use EDD, another message will appear once the import is complete.'\n )\n", (4399, 4523), True, 'from django.utils.translation import ugettext as _\n'), ((4344, 4369), 'edd.notify.backend.RedisBroker', 'RedisBroker', (['request.user'], {}), '(request.user)\n', (4355, 4369), False, 'from edd.notify.backend import RedisBroker\n')]
|
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import mock
import os
import experimental_framework.common as common
from experimental_framework import APEX_LAKE_ROOT
from experimental_framework.api import FrameworkApi
from experimental_framework.benchmarking_unit import BenchmarkingUnit
import experimental_framework.benchmarks.\
instantiation_validation_benchmark as iv
from six.moves import map
from six.moves import range
class DummyBenchmarkingUnit(BenchmarkingUnit):
def __init__(self):
BenchmarkingUnit.__init__(self)
@staticmethod
def get_available_test_cases():
return ['BenchA', 'BenchB']
@staticmethod
def get_required_benchmarks(required_benchmarks):
common.BASE_DIR = "base_dir/"
return [iv.InstantiationValidationBenchmark('benchmark', dict())]
class DummyBenchmarkingUnit2(BenchmarkingUnit):
counter_init = 0
counter_finalize = 0
counter_run = 0
def __init__(self, base_heat_template, credentials,
heat_template_parameters, iterations, test_cases):
DummyBenchmarkingUnit.counter_init = 0
DummyBenchmarkingUnit.counter_finalize = 0
DummyBenchmarkingUnit.counter_run = 0
def initialize(self):
DummyBenchmarkingUnit2.counter_init += 1
def run_benchmarks(self):
DummyBenchmarkingUnit2.counter_run += 1
def finalize(self):
DummyBenchmarkingUnit2.counter_finalize += 1
class TestGeneratesTemplate(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('experimental_framework.common.init')
def test_init_for_success(self, mock_init):
FrameworkApi.init()
mock_init.assert_called_once_with(api=True)
# @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
# 'get_available_test_cases',
# side_effect=DummyBenchmarkingUnit.get_available_test_cases)
# def test_get_available_test_cases_for_success(self, mock_bench):
# expected = ['BenchA', 'BenchB']
# output = FrameworkApi.get_available_test_cases()
# self.assertEqual(expected, output)
@mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
'get_required_benchmarks',
side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
def test_get_test_case_features_for_success(self, mock_get_req_bench):
expected = dict()
expected['description'] = 'Instantiation Validation Benchmark'
expected['parameters'] = [
iv.THROUGHPUT,
iv.VLAN_SENDER,
iv.VLAN_RECEIVER]
expected['allowed_values'] = dict()
expected['allowed_values'][iv.THROUGHPUT] = \
list(map(str, list(range(0, 100))))
expected['allowed_values'][iv.VLAN_SENDER] = \
list(map(str, list(range(-1, 4096))))
expected['allowed_values'][iv.VLAN_RECEIVER] = \
list(map(str, list(range(-1, 4096))))
expected['default_values'] = dict()
expected['default_values'][iv.THROUGHPUT] = '1'
expected['default_values'][iv.VLAN_SENDER] = '-1'
expected['default_values'][iv.VLAN_RECEIVER] = '-1'
test_case = 'instantiation_validation_benchmark.' \
'InstantiationValidationBenchmark'
output = FrameworkApi.get_test_case_features(test_case)
self.assertEqual(expected, output)
def test__get_test_case_features__for_failure(self):
self.assertRaises(
ValueError, FrameworkApi.get_test_case_features, 111)
@mock.patch('experimental_framework.common.init')
@mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.common.get_credentials')
@mock.patch('experimental_framework.heat_template_generation.'
'generates_templates')
@mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit',
side_effect=DummyBenchmarkingUnit2)
def test_execute_framework_for_success(self, mock_b_unit, mock_heat,
mock_credentials, mock_log,
mock_common_init):
common.TEMPLATE_DIR = os.path.join(APEX_LAKE_ROOT,
'tests/data/generated_templates/')
test_cases = dict()
iterations = 1
heat_template = 'VTC_base_single_vm_wait.tmp'
heat_template_parameters = dict()
deployment_configuration = ''
openstack_credentials = dict()
openstack_credentials['ip_controller'] = ''
openstack_credentials['heat_url'] = ''
openstack_credentials['user'] = ''
openstack_credentials['password'] = ''
openstack_credentials['auth_uri'] = ''
openstack_credentials['project'] = ''
FrameworkApi.execute_framework(
test_cases, iterations, heat_template,
heat_template_parameters, deployment_configuration,
openstack_credentials)
|
[
"six.moves.range",
"experimental_framework.api.FrameworkApi.get_test_case_features",
"mock.patch",
"experimental_framework.benchmarking_unit.BenchmarkingUnit.__init__",
"experimental_framework.api.FrameworkApi.init",
"experimental_framework.api.FrameworkApi.execute_framework",
"os.path.join"
] |
[((2190, 2238), 'mock.patch', 'mock.patch', (['"""experimental_framework.common.init"""'], {}), "('experimental_framework.common.init')\n", (2200, 2238), False, 'import mock\n'), ((2794, 2958), 'mock.patch', 'mock.patch', (['"""experimental_framework.benchmarking_unit.BenchmarkingUnit.get_required_benchmarks"""'], {'side_effect': 'DummyBenchmarkingUnit.get_required_benchmarks'}), "(\n 'experimental_framework.benchmarking_unit.BenchmarkingUnit.get_required_benchmarks'\n , side_effect=DummyBenchmarkingUnit.get_required_benchmarks)\n", (2804, 2958), False, 'import mock\n'), ((4232, 4280), 'mock.patch', 'mock.patch', (['"""experimental_framework.common.init"""'], {}), "('experimental_framework.common.init')\n", (4242, 4280), False, 'import mock\n'), ((4286, 4333), 'mock.patch', 'mock.patch', (['"""experimental_framework.common.LOG"""'], {}), "('experimental_framework.common.LOG')\n", (4296, 4333), False, 'import mock\n'), ((4339, 4398), 'mock.patch', 'mock.patch', (['"""experimental_framework.common.get_credentials"""'], {}), "('experimental_framework.common.get_credentials')\n", (4349, 4398), False, 'import mock\n'), ((4404, 4490), 'mock.patch', 'mock.patch', (['"""experimental_framework.heat_template_generation.generates_templates"""'], {}), "(\n 'experimental_framework.heat_template_generation.generates_templates')\n", (4414, 4490), False, 'import mock\n'), ((4510, 4621), 'mock.patch', 'mock.patch', (['"""experimental_framework.benchmarking_unit.BenchmarkingUnit"""'], {'side_effect': 'DummyBenchmarkingUnit2'}), "('experimental_framework.benchmarking_unit.BenchmarkingUnit',\n side_effect=DummyBenchmarkingUnit2)\n", (4520, 4621), False, 'import mock\n'), ((1134, 1165), 'experimental_framework.benchmarking_unit.BenchmarkingUnit.__init__', 'BenchmarkingUnit.__init__', (['self'], {}), '(self)\n', (1159, 1165), False, 'from experimental_framework.benchmarking_unit import BenchmarkingUnit\n'), ((2295, 2314), 'experimental_framework.api.FrameworkApi.init', 'FrameworkApi.init', ([], {}), '()\n', (2312, 2314), False, 'from experimental_framework.api import FrameworkApi\n'), ((3985, 4031), 'experimental_framework.api.FrameworkApi.get_test_case_features', 'FrameworkApi.get_test_case_features', (['test_case'], {}), '(test_case)\n', (4020, 4031), False, 'from experimental_framework.api import FrameworkApi\n'), ((4870, 4933), 'os.path.join', 'os.path.join', (['APEX_LAKE_ROOT', '"""tests/data/generated_templates/"""'], {}), "(APEX_LAKE_ROOT, 'tests/data/generated_templates/')\n", (4882, 4933), False, 'import os\n'), ((5492, 5640), 'experimental_framework.api.FrameworkApi.execute_framework', 'FrameworkApi.execute_framework', (['test_cases', 'iterations', 'heat_template', 'heat_template_parameters', 'deployment_configuration', 'openstack_credentials'], {}), '(test_cases, iterations, heat_template,\n heat_template_parameters, deployment_configuration, openstack_credentials)\n', (5522, 5640), False, 'from experimental_framework.api import FrameworkApi\n'), ((3405, 3418), 'six.moves.range', 'range', (['(0)', '(100)'], {}), '(0, 100)\n', (3410, 3418), False, 'from six.moves import range\n'), ((3508, 3523), 'six.moves.range', 'range', (['(-1)', '(4096)'], {}), '(-1, 4096)\n', (3513, 3523), False, 'from six.moves import range\n'), ((3615, 3630), 'six.moves.range', 'range', (['(-1)', '(4096)'], {}), '(-1, 4096)\n', (3620, 3630), False, 'from six.moves import range\n')]
|
import pyglet
from emulator import emulator
def start(dt):
pyglet.clock.schedule_interval(emulator.main, 1/1000)
#need this for pyglet
def update(dt):
if emulator.cpu.opcode != 0x1210:
emulator.cpu.cycle()
else:
pyglet.clock.unschedule(update)
pyglet.clock.schedule_once(start, 3)
if __name__ == '__main__':
template = pyglet.gl.Config(double_buffer=True)
emulator = emulator(640, 320, config=template, caption="Chip-8 emulator")
emulator.loadROM('IBM.ch8')
pyglet.clock.schedule(update)
pyglet.app.run()
|
[
"emulator.emulator",
"pyglet.app.run",
"pyglet.clock.schedule_once",
"pyglet.gl.Config",
"pyglet.clock.schedule",
"emulator.emulator.loadROM",
"pyglet.clock.unschedule",
"emulator.emulator.cpu.cycle",
"pyglet.clock.schedule_interval"
] |
[((64, 119), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['emulator.main', '(1 / 1000)'], {}), '(emulator.main, 1 / 1000)\n', (94, 119), False, 'import pyglet\n'), ((362, 398), 'pyglet.gl.Config', 'pyglet.gl.Config', ([], {'double_buffer': '(True)'}), '(double_buffer=True)\n', (378, 398), False, 'import pyglet\n'), ((414, 476), 'emulator.emulator', 'emulator', (['(640)', '(320)'], {'config': 'template', 'caption': '"""Chip-8 emulator"""'}), "(640, 320, config=template, caption='Chip-8 emulator')\n", (422, 476), False, 'from emulator import emulator\n'), ((481, 508), 'emulator.emulator.loadROM', 'emulator.loadROM', (['"""IBM.ch8"""'], {}), "('IBM.ch8')\n", (497, 508), False, 'from emulator import emulator\n'), ((513, 542), 'pyglet.clock.schedule', 'pyglet.clock.schedule', (['update'], {}), '(update)\n', (534, 542), False, 'import pyglet\n'), ((547, 563), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (561, 563), False, 'import pyglet\n'), ((203, 223), 'emulator.emulator.cpu.cycle', 'emulator.cpu.cycle', ([], {}), '()\n', (221, 223), False, 'from emulator import emulator\n'), ((242, 273), 'pyglet.clock.unschedule', 'pyglet.clock.unschedule', (['update'], {}), '(update)\n', (265, 273), False, 'import pyglet\n'), ((282, 318), 'pyglet.clock.schedule_once', 'pyglet.clock.schedule_once', (['start', '(3)'], {}), '(start, 3)\n', (308, 318), False, 'import pyglet\n')]
|
from django.contrib import admin
import clone.models as mod
admin.site.register(mod.lineChart)
admin.site.register(mod.donutChart)
|
[
"django.contrib.admin.site.register"
] |
[((61, 95), 'django.contrib.admin.site.register', 'admin.site.register', (['mod.lineChart'], {}), '(mod.lineChart)\n', (80, 95), False, 'from django.contrib import admin\n'), ((96, 131), 'django.contrib.admin.site.register', 'admin.site.register', (['mod.donutChart'], {}), '(mod.donutChart)\n', (115, 131), False, 'from django.contrib import admin\n')]
|
# encoding=utf-8
"""
Created on 21:29 2018/11/12
@author: <NAME>
"""
import numpy as np
import scipy.io
import scipy.linalg
import sklearn.metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)
return K
class TCA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, gamma=1):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param gamma: kernel bandwidth for rbf kernel
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.gamma = gamma
def fit(self, Xs, Xt):
'''
Transform Xs and Xt
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: Xs_new and Xt_new after TCA
'''
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
return Xs_new, Xt_new
def fit_predict(self, Xs, Ys, Xt, Yt):
'''
Transform Xs and Xt, then make predictions on target using 1NN
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, Xt_new = self.fit(Xs, Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, y_pred)
return acc, y_pred
def fit_new(self, Xs, Xt, Xt2):
'''
Map Xt2 to the latent space created from Xt and Xs
:param Xs : ns * n_feature, source feature
:param Xt : nt * n_feature, target feature
:param Xt2: n_s, n_feature, target feature to be mapped
:return: Xt2_new, mapped Xt2 with projection created by Xs and Xt
'''
# Computing projection matrix A from Xs an Xt
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
# Compute kernel with Xt2 as target and X as source
Xt2 = Xt2.T
K = kernel(self.kernel_type, X1 = Xt2, X2 = X, gamma=self.gamma)
# New target features
Xt2_new = K @ A
return Xt2_new
def fit_predict_new(self, Xt, Xs, Ys, Xt2, Yt2):
'''
Transfrom Xt and Xs, get Xs_new
Transform Xt2 with projection matrix created by Xs and Xt, get Xt2_new
Make predictions on Xt2_new using classifier trained on Xs_new
:param Xt: ns * n_feature, target feature
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt2: nt * n_feature, new target feature
:param Yt2: nt * 1, new target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, _ = self.fit(Xs, Xt)
Xt2_new = self.fit_new(Xs, Xt, Xt2)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt2_new)
acc = sklearn.metrics.accuracy_score(Yt2, y_pred)
return acc, y_pred
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
for i in [1]:
for j in [2]:
if i != j:
src, tar = 'data/' + domains[i], 'data/' + domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['labels'], tar_domain['feas'], tar_domain['labels']
# Split target data
Xt1, Xt2, Yt1, Yt2 = train_test_split(Xt, Yt, train_size=50, stratify=Yt, random_state=42)
# Create latent space and evaluate using Xs and Xt1
tca = TCA(kernel_type='linear', dim=30, lamb=1, gamma=1)
acc1, ypre1 = tca.fit_predict(Xs, Ys, Xt1, Yt1)
# Project and evaluate Xt2 existing projection matrix and classifier
acc2, ypre2 = tca.fit_predict_new(Xt1, Xs, Ys, Xt2, Yt2)
print(f'Accuracy of mapped source and target1 data : {acc1:.3f}') #0.800
print(f'Accuracy of mapped target2 data : {acc2:.3f}') #0.706
|
[
"numpy.eye",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.linalg.norm",
"numpy.dot",
"numpy.linalg.multi_dot"
] |
[((1536, 1559), 'numpy.hstack', 'np.hstack', (['(Xs.T, Xt.T)'], {}), '((Xs.T, Xt.T))\n', (1545, 1559), True, 'import numpy as np\n'), ((1573, 1598), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1587, 1598), True, 'import numpy as np\n'), ((2127, 2140), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (2137, 2140), True, 'import numpy as np\n'), ((2186, 2200), 'numpy.dot', 'np.dot', (['A.T', 'K'], {}), '(A.T, K)\n', (2192, 2200), True, 'import numpy as np\n'), ((2214, 2239), 'numpy.linalg.norm', 'np.linalg.norm', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (2228, 2239), True, 'import numpy as np\n'), ((2772, 2807), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (2792, 2807), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3399, 3422), 'numpy.hstack', 'np.hstack', (['(Xs.T, Xt.T)'], {}), '((Xs.T, Xt.T))\n', (3408, 3422), True, 'import numpy as np\n'), ((3436, 3461), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3450, 3461), True, 'import numpy as np\n'), ((3990, 4003), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (4000, 4003), True, 'import numpy as np\n'), ((4971, 5006), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (4991, 5006), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1771, 1795), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '"""fro"""'], {}), "(M, 'fro')\n", (1785, 1795), True, 'import numpy as np\n'), ((1808, 1817), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (1814, 1817), True, 'import numpy as np\n'), ((2042, 2074), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, H, K.T]'], {}), '([K, H, K.T])\n', (2061, 2074), True, 'import numpy as np\n'), ((3634, 3658), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '"""fro"""'], {}), "(M, 'fro')\n", (3648, 3658), True, 'import numpy as np\n'), ((3671, 3680), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (3677, 3680), True, 'import numpy as np\n'), ((3905, 3937), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, H, K.T]'], {}), '([K, H, K.T])\n', (3924, 3937), True, 'import numpy as np\n'), ((1828, 1843), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (1835, 1843), True, 'import numpy as np\n'), ((1980, 2012), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, M, K.T]'], {}), '([K, M, K.T])\n', (1999, 2012), True, 'import numpy as np\n'), ((3691, 3706), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (3698, 3706), True, 'import numpy as np\n'), ((3843, 3875), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, M, K.T]'], {}), '([K, M, K.T])\n', (3862, 3875), True, 'import numpy as np\n'), ((5709, 5778), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xt', 'Yt'], {'train_size': '(50)', 'stratify': 'Yt', 'random_state': '(42)'}), '(Xt, Yt, train_size=50, stratify=Yt, random_state=42)\n', (5725, 5778), False, 'from sklearn.model_selection import train_test_split\n'), ((1688, 1704), 'numpy.ones', 'np.ones', (['(ns, 1)'], {}), '((ns, 1))\n', (1695, 1704), True, 'import numpy as np\n'), ((1716, 1732), 'numpy.ones', 'np.ones', (['(nt, 1)'], {}), '((nt, 1))\n', (1723, 1732), True, 'import numpy as np\n'), ((2027, 2040), 'numpy.eye', 'np.eye', (['n_eye'], {}), '(n_eye)\n', (2033, 2040), True, 'import numpy as np\n'), ((3551, 3567), 'numpy.ones', 'np.ones', (['(ns, 1)'], {}), '((ns, 1))\n', (3558, 3567), True, 'import numpy as np\n'), ((3579, 3595), 'numpy.ones', 'np.ones', (['(nt, 1)'], {}), '((nt, 1))\n', (3586, 3595), True, 'import numpy as np\n'), ((3890, 3903), 'numpy.eye', 'np.eye', (['n_eye'], {}), '(n_eye)\n', (3896, 3903), True, 'import numpy as np\n'), ((464, 478), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (474, 478), True, 'import numpy as np\n'), ((482, 496), 'numpy.asarray', 'np.asarray', (['X2'], {}), '(X2)\n', (492, 496), True, 'import numpy as np\n'), ((569, 583), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (579, 583), True, 'import numpy as np\n'), ((689, 703), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (699, 703), True, 'import numpy as np\n'), ((707, 721), 'numpy.asarray', 'np.asarray', (['X2'], {}), '(X2)\n', (717, 721), True, 'import numpy as np\n'), ((798, 812), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (808, 812), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
data = pd.read_csv('data/shuttle-landing-control.csv',names=['auto_control','stability','error','sign','wind','magnitude','visibility'])
## replacing missing values '*' with 0
data = data.replace('*',0)
## |---------- Data Set Properties ------------|
# |----- Catagorical Value Map: 2 = True / 1 = False, 0 = Missing value -----|
## Always be aware of data type of a column, it can create error or unchange value when condition applied or other proccessing task applied.
## Converting data types into homogeneus element
data=data.astype('int')
## Assuming standardization is not needed at all value is mapped into same type of catagory
## Cross validation is not needed because data is too low
print(data.dtypes)
print(data.describe())
data.loc[data['auto_control']==1,'auto_control'] = False
data.loc[data['auto_control']==2,'auto_control'] = True
data.loc[data['visibility']==1,'visibility'] = False
data.loc[data['visibility']==2,'visibility'] = True
data.loc[data['sign']==1,'sign'] = '-'
data.loc[data['sign']==2,'sign'] = '+'
data.loc[data['wind']==1,'wind'] = 'tail'
data.loc[data['wind']==2,'wind'] = 'head'
data.loc[data['stability']==1,'stability'] = 'stab'
data.loc[data['stability']==2,'stability'] = 'xstab'
print(data)
|
[
"pandas.read_csv"
] |
[((70, 210), 'pandas.read_csv', 'pd.read_csv', (['"""data/shuttle-landing-control.csv"""'], {'names': "['auto_control', 'stability', 'error', 'sign', 'wind', 'magnitude',\n 'visibility']"}), "('data/shuttle-landing-control.csv', names=['auto_control',\n 'stability', 'error', 'sign', 'wind', 'magnitude', 'visibility'])\n", (81, 210), True, 'import pandas as pd\n')]
|
import unittest as ut
import pandas as pd
import pymc3 as pm
from bayesian_inference_confusion_matrix import ConfusionMatrixAnalyser, bayes_laplace_prior
class TestConfusionMatrixAnalyser(ut.TestCase):
def __init__(self, *args, **kwargs):
super(TestConfusionMatrixAnalyser, self).__init__(*args, **kwargs)
input_cm = pd.Series([9, 1, 3, 2], index=['TP', 'FN', 'TN', 'FP'])
# use improper prior to avoid bias / simplifies calculation
self.analyser = ConfusionMatrixAnalyser(input_cm)
self.N = self.analyser.confusion_matrix.values.sum()
sel_n = 100000
inf_n_pp = self.analyser.posterior_predict_confusion_matrices(pp_n=sel_n)
inf_n_pp /= float(sel_n)
self.inf_n_pp = inf_n_pp
def test_theta_and_x_sampling(self):
"""confirm that sampled expected value/variance for theta and X are close to the analytical solution
see https://en.wikipedia.org/wiki/Dirichlet_distribution
and https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution"""
alpha = self.analyser.confusion_matrix
alpha_0 = float(sum(alpha))
dirichlet_mean = alpha / alpha_0
dcm_mean = self.N * dirichlet_mean
dirichlet_var = dirichlet_mean * (1 - dirichlet_mean) / (alpha_0 + 1)
dcm_var = self.N * (self.N + alpha_0) * dirichlet_var
for i in self.analyser.theta_samples:
self.assertAlmostEqual(dirichlet_mean[i],
self.analyser.theta_samples[i].mean(),
delta=1e-2)
self.assertAlmostEqual(dcm_mean[i],
self.analyser.pp_samples[i].mean(),
delta=5e-2)
self.assertAlmostEqual(dirichlet_var[i],
self.analyser.theta_samples[i].var(),
delta=1e-3)
self.assertAlmostEqual(dcm_var[i],
self.analyser.pp_samples[i].var(),
delta=2e-1)
def test_expected_value(self):
"""confirm that expected value is equal to the metric for the original confusion matrix
(within 1 percentage point)"""
for metric in self.analyser.cm_metrics.index:
self.assertAlmostEqual(self.analyser.cm_metrics[metric],
self.analyser.theta_metrics.mean()[metric],
delta=1e-2)
def test_variance_convergence(self):
"""test that the variance of the posterior predictions of V_i/N converge towards the variance of theta_i"""
theta_var = self.analyser.theta_samples.var()
inf_n_pp_var = self.inf_n_pp.var()
for i in theta_var.index:
self.assertAlmostEqual(theta_var[i], inf_n_pp_var[i], delta=1e-5)
def test_expected_value_pp_theta(self):
"""test that the expected value from the posterior prediction and theta are identical
this only works for very large N"""
for i in self.analyser.theta_samples.columns:
self.assertAlmostEqual(self.analyser.theta_samples.mean()[i],
self.inf_n_pp.mean()[i],
delta=1e-4)
def test_selected_metrics(self):
"""test if metrics are properly calculated, this is only done for a handful"""
self.assertEqual(self.analyser.cm_metrics['ACC'], 12. / 15.)
self.assertEqual(self.analyser.cm_metrics['PREVALENCE'], 10. / 15.)
self.assertEqual(self.analyser.cm_metrics['TPR'], 9. / 10.)
@ut.skip("pyMC test is disabled because it takes 15-90 seconds")
def test_pymc_implementation(self):
"""my analytical implementation and pyMC should yield the same results.
Test expected value and variance for theta"""
# need to use Bayes-Laplace prior: pyMC cannot deal with Haldane prior
analyser_bl = ConfusionMatrixAnalyser(self.analyser.confusion_matrix,
prior=bayes_laplace_prior)
# inference with pyMC
with pm.Model() as multinom_test:
a = pm.Dirichlet('a', a=bayes_laplace_prior.astype(float).values)
data_pred = pm.Multinomial('data_pred',
n=self.N,
p=a,
observed=self.analyser.confusion_matrix)
trace = pm.sample(5000)
# get pymc samples
pymc_trace_samples = pd.DataFrame(trace.get_values('a'),
columns=self.analyser.confusion_matrix.index)
# compare expected value and variance
for i in self.analyser.theta_samples:
self.assertAlmostEqual(pymc_trace_samples[i].mean(),
analyser_bl.theta_samples[i].mean(),
delta=1e-2)
self.assertAlmostEqual(pymc_trace_samples[i].var(),
analyser_bl.theta_samples[i].var(),
delta=1e-3)
if __name__ == '__main__':
ut.main(verbosity=2)
|
[
"unittest.main",
"pymc3.sample",
"pymc3.Model",
"unittest.skip",
"pandas.Series",
"bayesian_inference_confusion_matrix.ConfusionMatrixAnalyser",
"bayesian_inference_confusion_matrix.bayes_laplace_prior.astype",
"pymc3.Multinomial"
] |
[((3638, 3701), 'unittest.skip', 'ut.skip', (['"""pyMC test is disabled because it takes 15-90 seconds"""'], {}), "('pyMC test is disabled because it takes 15-90 seconds')\n", (3645, 3701), True, 'import unittest as ut\n'), ((5192, 5212), 'unittest.main', 'ut.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5199, 5212), True, 'import unittest as ut\n'), ((341, 396), 'pandas.Series', 'pd.Series', (['[9, 1, 3, 2]'], {'index': "['TP', 'FN', 'TN', 'FP']"}), "([9, 1, 3, 2], index=['TP', 'FN', 'TN', 'FP'])\n", (350, 396), True, 'import pandas as pd\n'), ((489, 522), 'bayesian_inference_confusion_matrix.ConfusionMatrixAnalyser', 'ConfusionMatrixAnalyser', (['input_cm'], {}), '(input_cm)\n', (512, 522), False, 'from bayesian_inference_confusion_matrix import ConfusionMatrixAnalyser, bayes_laplace_prior\n'), ((3978, 4065), 'bayesian_inference_confusion_matrix.ConfusionMatrixAnalyser', 'ConfusionMatrixAnalyser', (['self.analyser.confusion_matrix'], {'prior': 'bayes_laplace_prior'}), '(self.analyser.confusion_matrix, prior=\n bayes_laplace_prior)\n', (4001, 4065), False, 'from bayesian_inference_confusion_matrix import ConfusionMatrixAnalyser, bayes_laplace_prior\n'), ((4151, 4161), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4159, 4161), True, 'import pymc3 as pm\n'), ((4282, 4370), 'pymc3.Multinomial', 'pm.Multinomial', (['"""data_pred"""'], {'n': 'self.N', 'p': 'a', 'observed': 'self.analyser.confusion_matrix'}), "('data_pred', n=self.N, p=a, observed=self.analyser.\n confusion_matrix)\n", (4296, 4370), True, 'import pymc3 as pm\n'), ((4503, 4518), 'pymc3.sample', 'pm.sample', (['(5000)'], {}), '(5000)\n', (4512, 4518), True, 'import pymc3 as pm\n'), ((4216, 4249), 'bayesian_inference_confusion_matrix.bayes_laplace_prior.astype', 'bayes_laplace_prior.astype', (['float'], {}), '(float)\n', (4242, 4249), False, 'from bayesian_inference_confusion_matrix import ConfusionMatrixAnalyser, bayes_laplace_prior\n')]
|
# Copyright 2011 <NAME> (wtwf.com)
# based on code by '<EMAIL> (<NAME>)'
__author__ = 'wtwf.com (<NAME>)'
# If you want to check this with pychecker on osx you can do this...
# export PYTHONPATH=$PYTHONPATH:/usr/local/google_appengine/
# export PYTHONPATH=$PYTHONPATH:/usr/local/google_appengine/lib/yaml/lib/
from google.appengine.ext import vendor
vendor.add('lib')
from google.appengine.ext import webapp
from wtwf import wtwfhandler
from crud import crud_handler
import auth
import blogger
# import expand
import gps
import instagram
import mail
app = webapp.WSGIApplication([
('/data/gps.*', gps.Demo),
('/data/blogger/oauth', blogger.BloggerHandler),
('/data/blogger/blog', blogger.BloggerDataHandler),
(r'/public/data/blogger/feed/(\w+)', blogger.GetFeedHandler),
(r'/public/data/instagram/feed/([^/]+)', instagram.RssFeed),
(r'/mailfeed/([a-zA-Z0-9_-]+)', mail.FeedFromEmail),
# ('/expand/([a-zA-Z0-9_.%-]+)', expand.ExpandHandler),
# ('/data/expand/feed.json', crud_handler.GetCrudHandler(expand.ExpandFeed)),
# ('/data/expand/item.json', expand.ExpandItemDataHandler),
('/data/mail/feed.json', crud_handler.GetCrudHandler(mail.MailFeed)),
('/data/mail/item.json', mail.MailItemDataHandler),
('/data/user/user.json', wtwfhandler.UserHandler),
('/data/bulkdeletemailitems', mail.BulkDeleteMailItems),
('/data/setupdemo', mail.SetupDemo),
(r'/_ah/mail/(.+)', mail.EmailToFeed),
(auth.decorator.callback_path, auth.decorator.callback_handler())
])
|
[
"google.appengine.ext.vendor.add",
"crud.crud_handler.GetCrudHandler",
"auth.decorator.callback_handler"
] |
[((355, 372), 'google.appengine.ext.vendor.add', 'vendor.add', (['"""lib"""'], {}), "('lib')\n", (365, 372), False, 'from google.appengine.ext import vendor\n'), ((1133, 1175), 'crud.crud_handler.GetCrudHandler', 'crud_handler.GetCrudHandler', (['mail.MailFeed'], {}), '(mail.MailFeed)\n', (1160, 1175), False, 'from crud import crud_handler\n'), ((1457, 1490), 'auth.decorator.callback_handler', 'auth.decorator.callback_handler', ([], {}), '()\n', (1488, 1490), False, 'import auth\n')]
|
# coding: utf-8
import numpy as np
import math
from block_average import block_average
def main():
# Enter details here
n_samples = [int(2.5e5)]
# n_samples = [int(5e5),int(1e6),int(2e6),int(4e6)]
for n_sample in n_samples:
# Generate uncorrelated random samples
uncorrelated_samples = np.random.normal(size=n_sample)
average = np.mean(uncorrelated_samples)
variance = np.var(uncorrelated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(uncorrelated_samples)
# Write output
outfile = "uncorr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
# Generate correlated random samples with MC walk
moves = np.random.normal(0.0, 0.05, size=5 * n_sample)
series = []
pos = 0.0
ener = energy(pos)
for i in range(n_sample):
series.append(pos)
trial_pos = pos + moves[i]
trial_ener = energy(trial_pos)
if trial_ener < ener:
pos = trial_pos
ener = trial_ener
else:
rand = np.random.uniform()
if math.exp(-(trial_ener - ener)) > rand:
pos = trial_pos
ener = trial_ener
correlated_samples = np.asarray(series)
# np.savetxt('correlated-samples.txt',correlated_samples)
average = np.mean(correlated_samples)
variance = np.var(correlated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(correlated_samples)
# Write output
outfile = "corr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
def energy(x):
return x ** 2
if __name__ == "__main__":
main()
|
[
"numpy.random.uniform",
"math.exp",
"numpy.asarray",
"block_average.block_average",
"numpy.mean",
"numpy.random.normal",
"numpy.var"
] |
[((324, 355), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_sample'}), '(size=n_sample)\n', (340, 355), True, 'import numpy as np\n'), ((375, 404), 'numpy.mean', 'np.mean', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (382, 404), True, 'import numpy as np\n'), ((424, 452), 'numpy.var', 'np.var', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (430, 452), True, 'import numpy as np\n'), ((543, 578), 'block_average.block_average', 'block_average', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (556, 578), False, 'from block_average import block_average\n'), ((1325, 1371), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.05)'], {'size': '(5 * n_sample)'}), '(0.0, 0.05, size=5 * n_sample)\n', (1341, 1371), True, 'import numpy as np\n'), ((1908, 1926), 'numpy.asarray', 'np.asarray', (['series'], {}), '(series)\n', (1918, 1926), True, 'import numpy as np\n'), ((2012, 2039), 'numpy.mean', 'np.mean', (['correlated_samples'], {}), '(correlated_samples)\n', (2019, 2039), True, 'import numpy as np\n'), ((2059, 2085), 'numpy.var', 'np.var', (['correlated_samples'], {}), '(correlated_samples)\n', (2065, 2085), True, 'import numpy as np\n'), ((2176, 2209), 'block_average.block_average', 'block_average', (['correlated_samples'], {}), '(correlated_samples)\n', (2189, 2209), False, 'from block_average import block_average\n'), ((1726, 1745), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1743, 1745), True, 'import numpy as np\n'), ((1765, 1795), 'math.exp', 'math.exp', (['(-(trial_ener - ener))'], {}), '(-(trial_ener - ener))\n', (1773, 1795), False, 'import math\n')]
|
#! Python
from bs4 import BeautifulSoup
from urllib.request import urlopen
import requests, re, time
# Might need to run the following command in windows to change the encoder
# chcp 65001
def getInput(datafile):
links = open(datafile, 'r')
LinkStorage = []
for line in links:
goodLine = line.strip('\n')
LinkStorage.append(goodLine)
links.close()
return LinkStorage
def createOutput(storedData, name):
outfile = open(name, 'w')
for i in storedData:
descrip = storedData[i]
print("{0}\n\t{1}".format(i, descrip), file = outfile)
outfile.close()
def main():
LinkStorage = getInput("combined.txt")
storedData = {}
linkedData = {}
companyData = {}
for i in LinkStorage:
PosNameSearch = re.search('http://www.biospace.com/jobs/job-listing/(.*)-[0-9]', i)
position = PosNameSearch.group(1)
html = requests.get(i).text
soup = BeautifulSoup(html, 'html5lib')
description = soup.find("div").findAll("span", attrs={'id':'ctl00_phMainContent_lblJobRequirements'})
company = soup.find("div").findAll("span", attrs={'id':'ctl00_phMainContent_lblJobDescription'})
storedData[position] = description
linkedData[position] = i
companyData[position] = company
print(i)
time.sleep(1)
createOutput(storedData, "output2.txt")
createOutput(linkedData, "linkedData1.txt")
createOutput(companyData, "companyData.txt")
if __name__ == '__main__': main()
|
[
"bs4.BeautifulSoup",
"re.search",
"requests.get",
"time.sleep"
] |
[((723, 790), 're.search', 're.search', (['"""http://www.biospace.com/jobs/job-listing/(.*)-[0-9]"""', 'i'], {}), "('http://www.biospace.com/jobs/job-listing/(.*)-[0-9]', i)\n", (732, 790), False, 'import requests, re, time\n'), ((866, 897), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (879, 897), False, 'from bs4 import BeautifulSoup\n'), ((1212, 1225), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1222, 1225), False, 'import requests, re, time\n'), ((836, 851), 'requests.get', 'requests.get', (['i'], {}), '(i)\n', (848, 851), False, 'import requests, re, time\n')]
|
"""
Code for collecting failure trajectories using Bayesian Optimization
Project : Policy correction using Bayesian Optimization
Description : The file contains functions for computing failure trajectories given RL policy and
safety specifications
"""
import numpy as np
import gym
import GPyOpt
from numpy.random import seed
from eval_policy import display
import gym
from network import FeedForwardActorNN
import torch
import pickle
from numpy import arange
from numpy.random import rand
'''
Bayesian Optimization module for uncovering failure trajectories
Safety Requirement
# Requirement 1: The walker should not fall down in any trajectory
'''
#=============================================Global Variables =================================#
policy = None
env = None
traj_spec_dic = {}
traj_count = 0
index_count = 0
'''
The function called from within the bayesian optimization module
parameters : bounds containing the sampled variables of the state vector
return : calls specification function and computes and returns the minimum value
'''
def sample_trajectory(sample_1,sample_2,sample_3):
global policy, env, traj_spec_dic,traj_count, index_count
selected_seed = env.seed(None)
x1 = sample_1
x2 = sample_2
x3 = sample_3
env.reset()
env.env.state[0] = x1
env.env.state[2] = x2
env.env.state[3] = x3
obs = torch.Tensor(env.env.state)
#print(f'env.env.state =========== {env.env.state}')
iters= 0
ep_ret = 0
ep_ret, traj, iter = display(obs,policy,env,False)
additional_data = {'reward':ep_ret}
#Create trajectory to be sent to safety specification
traj = (traj, additional_data)
#print(f'trajectory ========== {traj}')
specification_evaluation = safet_spec_2(traj)
index_count = index_count+1
#Store the set of trajectories with negative evaluation
if specification_evaluation<0:
traj_spec_dic[traj_count] = (traj[0],specification_evaluation,selected_seed,(x1,x2,x3))
traj_count = traj_count + 1
print(f'specification_evaluation ========== {specification_evaluation}')
return specification_evaluation
def run_Random():
x1_max = 2*np.pi
x1_min = 0
x2_max = 1
x2_min = -1
x3_max = 1
x3_min = -1
# generate a random sample from the domain
sample_1 = x1_min + rand(1000) * (x1_max - x1_min)
sample_2 = x2_min + rand(1000) * (x2_max - x2_min)
sample_3 = x3_min + rand(1000) * (x3_max - x3_min)
print(f'sample length ========== {len(sample_1)}')
for i in range(len(sample_1)):
val = sample_trajectory(sample_1[i],sample_2[i],sample_3[i])
print(f'sample1 =========== {sample_1[i]} ======== sample2 ==== {sample_2[i]} ==== sample3 ===== {sample_3[i]}')
'''sample = list()
step = 0.7
for sample_1 in arange(x1_min, x1_max+step, step):
for sample_2 in arange(x2_min, x2_max+step, step):
for sample_3 in arange(x3_min, x3_max+step, step):
sample.append([sample_1,sample_2,sample_3])
print(f'sample length ========== {len(sample)}')
for i in range(len(sample)):
val = sample_trajectory(sample[i][0],sample[i][1],sample[i][2])
print(f'sample1 =========== {sample[i][0]} ======== sample2 ==== {sample[i][1]} ==== sample3 ===== {sample[i][2]}')'''
# 1. Find the initial condition such that the pendulum stabilizes to 0
def safet_spec_1(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = np.abs(np.arccos(ct))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
# 1. Find the initial condition such that the reward is less than 50
def safet_spec_2(traj):
traj = traj[1]
reward = traj['reward']
#print(f'reward ========== {reward}')
return -(50-reward)
if __name__ == '__main__':
env = gym.make('BipedalWalker-v3')
seed = 0
env.seed(seed)
actor_model = 'Policies/ppo_actor_updatedBipedalWalker-v3.pth'
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim, False)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
run_Random()
print(f'Length trajectory ========== {len(traj_spec_dic)}')
with open('failure_trajectory_bipedal.data', 'wb') as filehandle1:
# store the observation data as binary data stream
pickle.dump(traj_spec_dic, filehandle1)
|
[
"pickle.dump",
"numpy.abs",
"gym.make",
"network.FeedForwardActorNN",
"torch.load",
"eval_policy.display",
"torch.Tensor",
"numpy.array",
"numpy.random.rand",
"numpy.arccos"
] |
[((1450, 1477), 'torch.Tensor', 'torch.Tensor', (['env.env.state'], {}), '(env.env.state)\n', (1462, 1477), False, 'import torch\n'), ((1592, 1624), 'eval_policy.display', 'display', (['obs', 'policy', 'env', '(False)'], {}), '(obs, policy, env, False)\n', (1599, 1624), False, 'from eval_policy import display\n'), ((4072, 4100), 'gym.make', 'gym.make', (['"""BipedalWalker-v3"""'], {}), "('BipedalWalker-v3')\n", (4080, 4100), False, 'import gym\n'), ((4438, 4481), 'network.FeedForwardActorNN', 'FeedForwardActorNN', (['obs_dim', 'act_dim', '(False)'], {}), '(obs_dim, act_dim, False)\n', (4456, 4481), False, 'from network import FeedForwardActorNN\n'), ((4570, 4593), 'torch.load', 'torch.load', (['actor_model'], {}), '(actor_model)\n', (4580, 4593), False, 'import torch\n'), ((4819, 4858), 'pickle.dump', 'pickle.dump', (['traj_spec_dic', 'filehandle1'], {}), '(traj_spec_dic, filehandle1)\n', (4830, 4858), False, 'import pickle\n'), ((2433, 2443), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2437, 2443), False, 'from numpy.random import rand\n'), ((2489, 2499), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2493, 2499), False, 'from numpy.random import rand\n'), ((2545, 2555), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2549, 2555), False, 'from numpy.random import rand\n'), ((3583, 3597), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (3591, 3597), True, 'import numpy as np\n'), ((3621, 3635), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (3629, 3635), True, 'import numpy as np\n'), ((3756, 3766), 'numpy.abs', 'np.abs', (['td'], {}), '(td)\n', (3762, 3766), True, 'import numpy as np\n'), ((3736, 3749), 'numpy.arccos', 'np.arccos', (['ct'], {}), '(ct)\n', (3745, 3749), True, 'import numpy as np\n')]
|
# The purpose of this pileine stage script is to copy only the cleaned output files that is needed in the end (such that we can share them easily)
import os
import pipeline_commons
import shutil
import ReconstructionUtils
def do_output(segmentPath, globalParams):
segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
segmentOutputMinimalPath = os.path.join(globalParams.MINIMAL_OUTPUT_PATH, segmentName)
segmentOutputFullPath = os.path.join(globalParams.BASE_OUTPUT_PATH, segmentName)
if not os.path.exists(segmentOutputMinimalPath):
os.makedirs(segmentOutputMinimalPath, exist_ok=True)
# PAIRs of: (filename to copy, optional or not)
filesToCopyToOutputMin = [(ReconstructionUtils.FILENAME_CARS_TRAJECTORIES, False),
(ReconstructionUtils.FILENAME_PEOPLE_TRAJECTORIES, False),
(ReconstructionUtils.FILENAME_CARLA_BBOXES, True),
(ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD, False),
(ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD_SEGCOLOR, True),
(ReconstructionUtils.FILENAME_CENTERING_ENV, False),
(ReconstructionUtils.FILENAME_CAMERA_INTRISICS, True)
]
for fileToCopy in filesToCopyToOutputMin:
optional = fileToCopy[1]
filename = fileToCopy[0]
srcFullFilePath = os.path.join(segmentOutputFullPath, filename)
dstFullFilePath = os.path.join(segmentOutputMinimalPath, filename)
if os.path.exists(srcFullFilePath) == False:
if optional == False:
assert False, (f"Can't copy filename {filename} because it doesn't exists !")
else:
shutil.copyfile(srcFullFilePath, dstFullFilePath)
if __name__ == "__main__":
import pipeline_params
do_output(pipeline_params.FILENAME_SAMPLE[0], pipeline_params.globalParams)
|
[
"os.makedirs",
"pipeline_commons.extractSegmentNameFromPath",
"os.path.exists",
"shutil.copyfile",
"os.path.join"
] |
[((300, 356), 'pipeline_commons.extractSegmentNameFromPath', 'pipeline_commons.extractSegmentNameFromPath', (['segmentPath'], {}), '(segmentPath)\n', (343, 356), False, 'import pipeline_commons\n'), ((391, 450), 'os.path.join', 'os.path.join', (['globalParams.MINIMAL_OUTPUT_PATH', 'segmentName'], {}), '(globalParams.MINIMAL_OUTPUT_PATH, segmentName)\n', (403, 450), False, 'import os\n'), ((485, 541), 'os.path.join', 'os.path.join', (['globalParams.BASE_OUTPUT_PATH', 'segmentName'], {}), '(globalParams.BASE_OUTPUT_PATH, segmentName)\n', (497, 541), False, 'import os\n'), ((554, 594), 'os.path.exists', 'os.path.exists', (['segmentOutputMinimalPath'], {}), '(segmentOutputMinimalPath)\n', (568, 594), False, 'import os\n'), ((604, 656), 'os.makedirs', 'os.makedirs', (['segmentOutputMinimalPath'], {'exist_ok': '(True)'}), '(segmentOutputMinimalPath, exist_ok=True)\n', (615, 656), False, 'import os\n'), ((1512, 1557), 'os.path.join', 'os.path.join', (['segmentOutputFullPath', 'filename'], {}), '(segmentOutputFullPath, filename)\n', (1524, 1557), False, 'import os\n'), ((1584, 1632), 'os.path.join', 'os.path.join', (['segmentOutputMinimalPath', 'filename'], {}), '(segmentOutputMinimalPath, filename)\n', (1596, 1632), False, 'import os\n'), ((1645, 1676), 'os.path.exists', 'os.path.exists', (['srcFullFilePath'], {}), '(srcFullFilePath)\n', (1659, 1676), False, 'import os\n'), ((1841, 1890), 'shutil.copyfile', 'shutil.copyfile', (['srcFullFilePath', 'dstFullFilePath'], {}), '(srcFullFilePath, dstFullFilePath)\n', (1856, 1890), False, 'import shutil\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data import voxceleb_data_provider as voxceleb
from enum import Enum
from models import model_utils
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
import layers
import tensorflow as tf
GENERATOR_NAME_SCOPE = 'image_gen'
LAYOUT_GENERATOR_NAME_SCOPE = 'layout_gen'
DISCRIMINATOR_NAME_SCOPE = 'discriminator'
LAYOUT_ENCODER_NAME_SCOPE = 'layout_enc'
ENCODER_NAME_SCOPE = 'image_enc'
BACKGROUND_IDX = 0
class BaseHeadSynthesisModel(object):
def __init__(self,
config: Dict,
order: Enum=layers.NHWC):
"""
Initializes a few-shot talking head synthesis model.
This function should be called within `strategy.scope()` if using
tf.disribute.Strategy.
Args:
- config: dictionary, model configuration and options (command line
flags).
- order: Enum, one of {layers.NHWC or NCHW} to specify the channel format
of image tensors.
"""
self.config = config
self.concat_landmarks_to_encoder_input = \
config.concat_conditional_inputs_to_encoder
self.order = order
if self.order != layers.NHWC:
raise NotImplementedError('NCHW format not yet supported!')
self.train_and_eval_networks_initialized = False
self.optimizers_initialized = False
def init_extra_train_and_eval_networks(self):
"""Initializes train losses, networks and optimizers.
This function should be called within `strategy.scope()` if using
tf.distribute.Strategy.
"""
pass
def load_loss_pretrained_weights(self):
"""Loads pre-trained weights for networks used for loss computation."""
if self._vgg_face_loss is not None:
resolution = int(self.config.train_resolution)
input_shape = (None, resolution , resolution, 3)
self._vgg_face_loss.load_pretrained_weights(input_shape=input_shape)
return
def create_optimizers(
self,
lr_warmstart_steps: int,
decay_start_step: int,
decay_end_step: int,
decay_num_intervals: int,
starting_step: Optional[int]=0,
lr_mul_factor: Optional[float]=1.) -> Dict[Text,
tf.keras.optimizers.Optimizer]:
"""Initializes optimizers for training.
This function should be called within `strategy.scope()` if using
tf.distribute.Strategy.
Args:
- lr_warmstart_steps: int, number of steps to apply learning rate warmup.
- decay_start_step: int, train step at which to start learning rate decay.
- decay_end_step: int, train step at which to end learning rate decay.
- decay_num_intervals: int, factor by which to decay the learning rate;
final learning rate = initial_learning_rate / `decay_num_intervals`.
- starting_step: int, the train starting step. This is zero when training
from scratch, or the loaded train step for finetuning a pre-trained
model.
- lr_mul_factor: optional float, multiplier factor for the learning rate;
mainly used to increase the learning rate w.r.t the number of gpus.
Returns:
A dictionary with all the otpimizers of the model training.
"""
pass
def parse_inputs(self,
inputs_dict: Dict[Text, tf.Tensor],
mode: Enum=model_utils.Mode.TRAIN,
augmentation: bool=False) -> Tuple[tf.Tensor, ...]:
"""Parses the input dataset into the required few-shot inputs.
Given an input dicionary for a mini-batch, this function constructs the
inputs to the encoder and the generator, as well as the ground truth output
for training/evaluation.
"""
# Parse mode-agnostic inputs.
person_id = inputs_dict[voxceleb.PERSON_ID_KEY]
video_id = inputs_dict[voxceleb.VIDEO_ID_KEY]
video_part_id = inputs_dict[voxceleb.VIDEO_PART_ID_KEY]
frames_few_shots = inputs_dict[voxceleb.FRAMES_KEY]
frame_target = inputs_dict[voxceleb.TARGET_FRAME_KEY]
contours_few_shots = inputs_dict[voxceleb.CONTOURS_KEY]
contour_target = inputs_dict[voxceleb.TARGET_CONTOUR_KEY]
segmaps_few_shots = inputs_dict[voxceleb.SEGMAPS_KEY]
segmap_target = inputs_dict[voxceleb.TARGET_SEGMAP_KEY]
# Cast segmentation label maps to int32
segmaps_few_shots = tf.dtypes.cast(segmaps_few_shots, tf.dtypes.int32)
segmap_target = tf.dtypes.cast(segmap_target, tf.dtypes.int32)
conditional_inputs = contour_target
z_style = inputs_dict['z_style'] if 'z_style' in inputs_dict else None
z_layout = inputs_dict['z_layout'] if 'z_layout' in inputs_dict else None
if z_style is not None or z_layout is not None:
precomputed_latents = (z_style, z_layout)
else:
precomputed_latents = None
basename = tf.strings.join((person_id, video_id, video_part_id),
separator='-')
channel_axis = 3 if self.order == layers.NHWC else 1
if precomputed_latents is None:
encoder_inputs = frames_few_shots
if self.concat_landmarks_to_encoder_input:
encoder_inputs = tf.concat((encoder_inputs, contours_few_shots),
axis=channel_axis + 1)
else:
encoder_inputs = None
# Parse mode-specific inputs.
if mode == model_utils.Mode.TRAIN or mode == model_utils.Mode.EVAL:
x_gt = frame_target
assert not augmentation, 'No augmentation supported yet!'
return (encoder_inputs, conditional_inputs, x_gt, segmap_target, basename,
precomputed_latents)
elif mode == model_utils.Mode.PREDICT:
return encoder_inputs, conditional_inputs, basename, precomputed_latents
else:
raise ValueError('Unsupported mode %s; must be one of '
'{TRAIN, EVAL, PREDICT}.' % str(mode))
def _add_summaries(
self,
encoder_inputs: tf.Tensor,
target_landmarks: tf.Tensor,
target_segmap: tf.Tensor,
real: tf.Tensor,
outputs_dict: Dict[Text, tf.Tensor],
fg_mask: Union[float, tf.Tensor]=1.,
person_id: Optional[tf.Tensor]=None,
video_id: Optional[tf.Tensor]=None,
video_part_id: Optional[tf.Tensor]=None,
input_basename: Optional[Union[Text, tf.Tensor]]=None,
visualize_rgb: Optional[bool]=True) -> Tuple[Dict[Text, tf.Tensor], ...]:
"""Prepares tensorboard summaries for training/evaluation.
This method takes all inputs, ground truth and intermediate outputs and
prepares image/scalar/text tensorboard summaries to visualize the training
or evaluation.
Args:
- encoder_inputs: 4D tensor, the input to the encoder network.
- target_landmarks: 4D tensor, the input the generator network.
- target_segmap: 4D tensor, the label map of the semantic segmentation (
shape = [batch_size, H, W, 1]).
- real: 4D tensor, the ground truth output.
- outputs_dict: dict string->tf.Tensor, all intermediate and final
outputs.
- fg_mask: Optional 4D tensor, a mask image to apply to the final output
and ground truth. Default is a scalar 1, which leaves the output and
ground truth unmasked.
- person_id: Optional text tensor, person_id for each example.
- video_id: Optional text tensor, video_id for each example.
- video_part_id: Optional text tensor, video_part_id for each example.
- input_basename: Optional text, basenames/base paths for each input in
the minibatch.
- visualize_rgb: Optional bool, whether or not to visualize RGB output.
Returns:
A 3-tuple: (scalar_summaries, image_summaries, text_summaries); each is a
dictionary of str->tf.Tensor.
"""
scalar_summaries_dict = {}
image_summaries_dict = {}
text_summaries_dict = {}
# Retrieve outputs.
fake = outputs_dict['output']
# Tensorboard text summaries.
if person_id is not None:
text_summaries_dict['person_id'] = person_id
if video_id is not None:
text_summaries_dict['video_id'] = video_id
if video_part_id is not None:
text_summaries_dict['video_part_id'] = video_part_id
if input_basename is not None:
text_summaries_dict['basename'] = input_basename
# Visualize few-shot inputs and target rgb frames.
if encoder_inputs is not None:
few_shot_inputs = tf.slice(
encoder_inputs, [0, 0, 0, 0, 0], [-1, -1, -1, -1, 3])
num_viz_shots = min(
5, tf.compat.v1.dimension_value(few_shot_inputs.shape[1]))
few_shot_splits = tf.split(few_shot_inputs, self.config.K, axis=1)
few_shot_splits = few_shot_splits[:num_viz_shots]
few_shot_splits = [tf.squeeze(x, axis=1) for x in few_shot_splits]
input_and_target_frames = few_shot_splits
input_and_target_frames.append(real)
few_shot_tuple_viz = tf.concat(input_and_target_frames, axis=2)
image_summaries_dict['few_shot_inputs_and_target'] = few_shot_tuple_viz
# Add IO tuple visualization.
io_tuple_items = []
io_tuple_items.append(target_landmarks)
if target_segmap is not None:
segmap_out_label_map = outputs_dict['segmap_label_map']
num_seg_classes = self.config.num_segmentation_classes
segmap_out_vis = model_utils.visualize_label_map(
segmap_out_label_map, num_seg_classes=num_seg_classes)
segmap_gt_vis = model_utils.visualize_label_map(
target_segmap, num_seg_classes=num_seg_classes)
io_tuple_items.append(segmap_out_vis)
io_tuple_items.append(segmap_gt_vis)
if visualize_rgb:
if not self.config.synthesize_background:
io_tuple_items.append(tf.clip_by_value(fake, -1, 1))
io_tuple_items.append(tf.clip_by_value(fake * fg_mask, -1, 1))
io_tuple_items.append(real * fg_mask)
# Concatenate along width.
io_tuple = tf.concat(io_tuple_items, axis=2)
image_summaries_dict['io_tuple'] = io_tuple
return scalar_summaries_dict, image_summaries_dict, text_summaries_dict
def compute_losses(
self,
real: tf.Tensor,
segmap_gt: tf.Tensor,
outputs_dict: Dict[Text, tf.Tensor],
training: bool,
fg_mask: Union[float, tf.Tensor]=1.,
conditional_inputs: Optional[tf.Tensor]=None,
gradient_tape: Optional[tf.GradientTape]=None) -> Dict[Text, tf.Tensor]:
"""Computes and returns per-example losses of a mini-batch.
Args:
- real: 4D tensor, the ground truth output.
- segmap_gt: 4D tensor, the label map of the semantic segmentation (
shape = [batch_size, H, W, 1]).
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- training: boolean, whether or not to run the networks in training mode.
- fg_mask: Optional 4D tensor, a mask image to apply to the final output
and ground truth. Default is a scalar 1, which leaves the output and
ground truth unchanged.
- conditional_inputs: Optional 4D tensor, the conditional input the
generator network. This is used for the conditional discriminator.
- gradient_tape: Optional tf.GradientTape, tensorflow's gradient_tape
for gradient penalty computation (if any).
Returns:
A dictionary (str->tf.Tensor), the value of each entry is a 1-D tensor
of length equal to the mini-batch size, representing the per-example loss
values.
"""
pass
def compute_latents(
self,
encoder_inputs: tf.Tensor,
num_few_shots: int,
training: bool,
use_vae: bool=False) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes layout and style latents given the input to the encoder(s).
Args:
- encoder_inputs: 4D or 5D tensor, the input the encoder network.
- num_few_shots: integer, number of few-shot inputs to the encoder.
- training: boolean, whether or not to run the networks in training mode.
- use_vae: boolean, whether the encoder is variational or not. If use_vae
is true AND training is true, then noise sampled from N(0,1) is added
to the standard deviaiton of the style latent.
Returns: a 2-tuple represeting the style and layout latents respectively.
"""
pass
def process(
self,
encoder_inputs: tf.Tensor,
conditional_inputs: tf.Tensor,
training: bool,
precomputed_latents: Optional[Tuple[tf.Tensor, ...]]=None,
) -> Dict[Text, Union[tf.Tensor, Tuple[tf.Tensor, ...]]]:
"""Runs the forward pass and returns all intermediate and final outputs.
Args:
- encoder_inputs: 4D or 5D tensor, the input the encoder network.
- conditional_inputs: 4D tensor, the input the generator network.
- training: boolean, whether or not to run the networks in training mode.
- precomputed_latents: Optional 2-tuple of tf.Tensor, pre-computed latent
codes for the input mini-batch. If not None, then the encoder network
is not run, and the pre-computed latents are used instead. If a single,
latent is being used, then the 2nd element in the tuple is None.
Returns: a dictionary holding all intermediate and final outputs.
"""
pass
def train(
self,
inputs_dict: Dict[Text, tf.Tensor],
global_batch_size: int,
train_g_step: bool=True,
train_d_step: bool=True) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a train step over the input mini/sub-mini batch.
Runs the training step over the input minibatch and aggregates the train
losses over the "global" batch size.
Args:
- inputs_dict: dictionary of strings->tensors representing an input
minibatch.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
- train_g_step: boolean, whether to update the generator weights or not.
- train_d_step: boolean, whether to update the discriminator weights or
not.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated according to the
global batch size.
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
"""
pass
@tf.function
def train_distributed(
self,
strategy: tf.distribute.Strategy,
dist_inputs_dict: Dict[Text, Any],
global_batch_size: int,
train_g_step: bool=True,
train_d_step: bool=True) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a distributed train step and aggregates losses across replicas.
Runs the train step over the global minibatch and aggregates the train
losses across different replicas.
Args:
- strategy: tf.distribute.Strategy to be used for building strategy-aware
networks.
- dist_inputs_dict: dictionary of strings->tensors representing an input
minibatch to be distributed across replicas.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
- train_g_step: boolean, whether to update the generator weights or not.
- train_d_step: boolean, whether to update the discriminator weights or
not.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated properly across
different replicas (i.e over the global batch size).
- outputs_dict: dict string->PerReplica object, all intermediate and
final outputs, but not aggregated (concatenated) across replicas.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
Summary tensors are PerReplica objects that are not aggregated
(concatenated) across replicas.
"""
(per_replica_loss_dict, per_replica_outputs_dict,
per_replica_summaries) = strategy.run(
self.train, args=(
dist_inputs_dict, global_batch_size, train_g_step, train_d_step))
loss_dict = {}
for loss_key, loss_val in per_replica_loss_dict.items():
loss_dict[loss_key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, loss_val, axis=None)
return loss_dict, per_replica_outputs_dict, per_replica_summaries
def evaluate(
self,
inputs_dict: Dict[Text, tf.Tensor],
global_batch_size: int) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs an evaluation step and updates evaluation metrics.
Runs the evaluation step over the input minibatch and aggregates the eval
losses over the "global" batch size. A side effect of this method is
updating the state of evaluation metrics in self.eval_metrics_dict.
Args:
- inputs_dict: dictionary of strings->tensors representing an input
minibatch.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated according to the
global batch size.
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
"""
pass
@tf.function
def evaluate_distributed(
self,
strategy: tf.distribute.Strategy,
dist_inputs_dict: Dict[Text, Any],
global_batch_size: int) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a distributed evaluation step and aggregates losses across replicas.
Runs the evaluation step over the global minibatch and aggregates the eval
losses across different replicas. A side effect of this method is
updating the state of evaluation metrics in self.eval_metrics_dict.
Args:
- strategy: tf.distribute.Strategy to be used for building strategy-aware
networks.
- dist_inputs_dict: dictionary of strings->tensors representing an input
minibatch to be distributed across replicas.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated properly across
different replicas (i.e over the global batch size).
- outputs_dict: dict string->PerReplica object, all intermediate and
final outputs, but not aggregated (concatenated) across replicas.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
Summary tensors are PerReplica objects that are not aggregated
(concatenated) across replicas.
"""
(per_replica_loss_dict, per_replica_outputs_dict,
per_replica_summaries) = strategy.run(
self.evaluate, args=(dist_inputs_dict, global_batch_size))
loss_dict = {}
for loss_key, loss_val in per_replica_loss_dict.items():
loss_dict[loss_key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, loss_val, axis=None)
return loss_dict, per_replica_outputs_dict, per_replica_summaries
def get_networks(self) -> Dict[Text, Union[
tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary with all networks and submodules of the model."""
pass
def get_optimizers(self) -> Dict[Text, tf.keras.optimizers.Optimizer]:
"""Returns a dictionary with all the otpimizers of the model training."""
pass
def reset_eval_metrics(self):
"""Resets the internal state of all evaluation metrics."""
for metric in self.eval_metrics_dict.values():
metric.reset_states()
|
[
"tensorflow.clip_by_value",
"models.model_utils.visualize_label_map",
"tensorflow.dtypes.cast",
"tensorflow.concat",
"tensorflow.strings.join",
"tensorflow.compat.v1.dimension_value",
"tensorflow.squeeze",
"tensorflow.slice",
"tensorflow.split"
] |
[((4813, 4863), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['segmaps_few_shots', 'tf.dtypes.int32'], {}), '(segmaps_few_shots, tf.dtypes.int32)\n', (4827, 4863), True, 'import tensorflow as tf\n'), ((4884, 4930), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['segmap_target', 'tf.dtypes.int32'], {}), '(segmap_target, tf.dtypes.int32)\n', (4898, 4930), True, 'import tensorflow as tf\n'), ((5283, 5351), 'tensorflow.strings.join', 'tf.strings.join', (['(person_id, video_id, video_part_id)'], {'separator': '"""-"""'}), "((person_id, video_id, video_part_id), separator='-')\n", (5298, 5351), True, 'import tensorflow as tf\n'), ((10318, 10351), 'tensorflow.concat', 'tf.concat', (['io_tuple_items'], {'axis': '(2)'}), '(io_tuple_items, axis=2)\n', (10327, 10351), True, 'import tensorflow as tf\n'), ((8835, 8897), 'tensorflow.slice', 'tf.slice', (['encoder_inputs', '[0, 0, 0, 0, 0]', '[-1, -1, -1, -1, 3]'], {}), '(encoder_inputs, [0, 0, 0, 0, 0], [-1, -1, -1, -1, 3])\n', (8843, 8897), True, 'import tensorflow as tf\n'), ((9029, 9077), 'tensorflow.split', 'tf.split', (['few_shot_inputs', 'self.config.K'], {'axis': '(1)'}), '(few_shot_inputs, self.config.K, axis=1)\n', (9037, 9077), True, 'import tensorflow as tf\n'), ((9325, 9367), 'tensorflow.concat', 'tf.concat', (['input_and_target_frames'], {'axis': '(2)'}), '(input_and_target_frames, axis=2)\n', (9334, 9367), True, 'import tensorflow as tf\n'), ((9729, 9820), 'models.model_utils.visualize_label_map', 'model_utils.visualize_label_map', (['segmap_out_label_map'], {'num_seg_classes': 'num_seg_classes'}), '(segmap_out_label_map, num_seg_classes=\n num_seg_classes)\n', (9760, 9820), False, 'from models import model_utils\n'), ((9849, 9928), 'models.model_utils.visualize_label_map', 'model_utils.visualize_label_map', (['target_segmap'], {'num_seg_classes': 'num_seg_classes'}), '(target_segmap, num_seg_classes=num_seg_classes)\n', (9880, 9928), False, 'from models import model_utils\n'), ((5590, 5660), 'tensorflow.concat', 'tf.concat', (['(encoder_inputs, contours_few_shots)'], {'axis': '(channel_axis + 1)'}), '((encoder_inputs, contours_few_shots), axis=channel_axis + 1)\n', (5599, 5660), True, 'import tensorflow as tf\n'), ((8949, 9003), 'tensorflow.compat.v1.dimension_value', 'tf.compat.v1.dimension_value', (['few_shot_inputs.shape[1]'], {}), '(few_shot_inputs.shape[1])\n', (8977, 9003), True, 'import tensorflow as tf\n'), ((9159, 9180), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (9169, 9180), True, 'import tensorflow as tf\n'), ((10187, 10226), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(fake * fg_mask)', '(-1)', '(1)'], {}), '(fake * fg_mask, -1, 1)\n', (10203, 10226), True, 'import tensorflow as tf\n'), ((10128, 10157), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['fake', '(-1)', '(1)'], {}), '(fake, -1, 1)\n', (10144, 10157), True, 'import tensorflow as tf\n')]
|
import os
import locale
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# ALLOWED_HOSTS = ['.connectdjango.com', 'localhost', '192.168.127.12'] #use for production
ALLOWED_HOSTS = ['127.0.0.1' ] # use for development
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'social_django',
'channels',
]
AUTHENTICATION_BACKENDS = [
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
'users.authentication.EmailAuthBackend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'connectdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'connectdjango.wsgi.application'
ASGI_APPLICATION = 'connectdjango.asgi.application'
# channels
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'connectdjango',
'USER': 'connectdjangouser',
'PASSWORD': '<PASSWORD>?',
'HOST': 'localhost',
'PORT': '5432',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'blogs'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
SOCIAL_AUTH_GITHUB_KEY = config('SOCIAL_AUTH_GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = config('SOCIAL_AUTH_GITHUB_SECRET')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
CSRF_FAILURE_VIEW = 'blog.views.csrf_failure'
|
[
"decouple.config",
"os.path.join",
"os.path.abspath"
] |
[((413, 433), 'decouple.config', 'config', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (419, 433), False, 'from decouple import config\n'), ((509, 550), 'decouple.config', 'config', (['"""DEBUG"""'], {'default': '(False)', 'cast': 'bool'}), "('DEBUG', default=False, cast=bool)\n", (515, 550), False, 'from decouple import config\n'), ((4063, 4100), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (4075, 4100), False, 'import os\n'), ((4137, 4168), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""media"""'], {}), "(BASE_DIR, 'media')\n", (4149, 4168), False, 'import os\n'), ((4289, 4328), 'decouple.config', 'config', (['"""SOCIAL_AUTH_GOOGLE_OAUTH2_KEY"""'], {}), "('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')\n", (4295, 4328), False, 'from decouple import config\n'), ((4364, 4406), 'decouple.config', 'config', (['"""SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET"""'], {}), "('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')\n", (4370, 4406), False, 'from decouple import config\n'), ((4432, 4464), 'decouple.config', 'config', (['"""SOCIAL_AUTH_GITHUB_KEY"""'], {}), "('SOCIAL_AUTH_GITHUB_KEY')\n", (4438, 4464), False, 'from decouple import config\n'), ((4493, 4528), 'decouple.config', 'config', (['"""SOCIAL_AUTH_GITHUB_SECRET"""'], {}), "('SOCIAL_AUTH_GITHUB_SECRET')\n", (4499, 4528), False, 'from decouple import config\n'), ((4608, 4657), 'decouple.config', 'config', (['"""EMAIL_USE_TLS"""'], {'default': '(False)', 'cast': 'bool'}), "('EMAIL_USE_TLS', default=False, cast=bool)\n", (4614, 4657), False, 'from decouple import config\n'), ((4671, 4712), 'decouple.config', 'config', (['"""EMAIL_HOST"""'], {'default': '"""localhost"""'}), "('EMAIL_HOST', default='localhost')\n", (4677, 4712), False, 'from decouple import config\n'), ((4731, 4768), 'decouple.config', 'config', (['"""EMAIL_HOST_USER"""'], {'default': '""""""'}), "('EMAIL_HOST_USER', default='')\n", (4737, 4768), False, 'from decouple import config\n'), ((4791, 4832), 'decouple.config', 'config', (['"""EMAIL_HOST_PASSWORD"""'], {'default': '""""""'}), "('EMAIL_HOST_PASSWORD', default='')\n", (4797, 4832), False, 'from decouple import config\n'), ((4846, 4888), 'decouple.config', 'config', (['"""EMAIL_PORT"""'], {'default': '(25)', 'cast': 'int'}), "('EMAIL_PORT', default=25, cast=int)\n", (4852, 4888), False, 'from decouple import config\n'), ((4013, 4045), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (4025, 4045), False, 'import os\n'), ((168, 193), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((1888, 1923), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1900, 1923), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
tuesmon_ncurses.ui.views.auth
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tuesmon_ncurses.ui.widgets import generic, auth
from . import base
class LoginView(base.View):
login_button = None
def __init__(self, username_text, password_text):
# Header
header = generic.banner()
# Username and password prompts
max_prompt_length = max(len(username_text), len(password_text))
max_prompt_padding = max_prompt_length + 2
self._username_editor = generic.editor()
username_prompt = auth.username_prompt(username_text, self._username_editor, max_prompt_padding)
self._password_editor = generic.editor(mask="♥")
password_prompt = auth.password_prompt(password_text, self._password_editor, max_prompt_padding)
# Login button
self.login_button = generic.button("login")
login_button_widget = auth.wrap_login_button(self.login_button)
# Notifier
self.notifier = generic.Notifier("")
login_widget = auth.Login([header,
generic.box_solid_fill(" ", 2),
username_prompt,
generic.box_solid_fill(" ", 1),
password_prompt,
generic.box_solid_fill(" ", 2),
login_button_widget,
generic.box_solid_fill(" ", 1),
self.notifier])
self.widget = generic.center(login_widget)
@property
def username(self):
return self._username_editor.get_edit_text()
@property
def password(self):
return self._password_editor.get_edit_text()
|
[
"tuesmon_ncurses.ui.widgets.auth.wrap_login_button",
"tuesmon_ncurses.ui.widgets.generic.Notifier",
"tuesmon_ncurses.ui.widgets.generic.box_solid_fill",
"tuesmon_ncurses.ui.widgets.generic.editor",
"tuesmon_ncurses.ui.widgets.generic.button",
"tuesmon_ncurses.ui.widgets.generic.center",
"tuesmon_ncurses.ui.widgets.auth.password_prompt",
"tuesmon_ncurses.ui.widgets.auth.username_prompt",
"tuesmon_ncurses.ui.widgets.generic.banner"
] |
[((308, 324), 'tuesmon_ncurses.ui.widgets.generic.banner', 'generic.banner', ([], {}), '()\n', (322, 324), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((521, 537), 'tuesmon_ncurses.ui.widgets.generic.editor', 'generic.editor', ([], {}), '()\n', (535, 537), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((564, 642), 'tuesmon_ncurses.ui.widgets.auth.username_prompt', 'auth.username_prompt', (['username_text', 'self._username_editor', 'max_prompt_padding'], {}), '(username_text, self._username_editor, max_prompt_padding)\n', (584, 642), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((675, 699), 'tuesmon_ncurses.ui.widgets.generic.editor', 'generic.editor', ([], {'mask': '"""♥"""'}), "(mask='♥')\n", (689, 699), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((726, 804), 'tuesmon_ncurses.ui.widgets.auth.password_prompt', 'auth.password_prompt', (['password_text', 'self._password_editor', 'max_prompt_padding'], {}), '(password_text, self._password_editor, max_prompt_padding)\n', (746, 804), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((856, 879), 'tuesmon_ncurses.ui.widgets.generic.button', 'generic.button', (['"""login"""'], {}), "('login')\n", (870, 879), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((910, 951), 'tuesmon_ncurses.ui.widgets.auth.wrap_login_button', 'auth.wrap_login_button', (['self.login_button'], {}), '(self.login_button)\n', (932, 951), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((995, 1015), 'tuesmon_ncurses.ui.widgets.generic.Notifier', 'generic.Notifier', (['""""""'], {}), "('')\n", (1011, 1015), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((1585, 1613), 'tuesmon_ncurses.ui.widgets.generic.center', 'generic.center', (['login_widget'], {}), '(login_widget)\n', (1599, 1613), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((1098, 1128), 'tuesmon_ncurses.ui.widgets.generic.box_solid_fill', 'generic.box_solid_fill', (['""" """', '(2)'], {}), "(' ', 2)\n", (1120, 1128), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((1223, 1253), 'tuesmon_ncurses.ui.widgets.generic.box_solid_fill', 'generic.box_solid_fill', (['""" """', '(1)'], {}), "(' ', 1)\n", (1245, 1253), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((1348, 1378), 'tuesmon_ncurses.ui.widgets.generic.box_solid_fill', 'generic.box_solid_fill', (['""" """', '(2)'], {}), "(' ', 2)\n", (1370, 1378), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n'), ((1477, 1507), 'tuesmon_ncurses.ui.widgets.generic.box_solid_fill', 'generic.box_solid_fill', (['""" """', '(1)'], {}), "(' ', 1)\n", (1499, 1507), False, 'from tuesmon_ncurses.ui.widgets import generic, auth\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.