code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import copy
import datetime
import re
from .base import BaseModel
import sys
if sys.version_info < (3, 9):
import typing
_re_date_format = re.compile(r'^\d\d\d\d-\d\d-\d\d$')
_re_datetime_format = re.compile(
r'^(\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d)\+(\d\d):(\d\d)$')
def _datetime_value(values: dict, key: str) -> datetime.datetime or None:
if key in values and values[key] is not None:
value = values[key]
match = _re_datetime_format.fullmatch(value)
if match is not None:
(dt, tz_hours, tz_minutes) = match.groups()
value = "{}+{}{}".format(dt, tz_hours, tz_minutes)
return datetime.datetime.strptime(
value, '%Y-%m-%dT%H:%M:%S%z')
return None
def _date_value(values: dict, key: str) -> datetime.date or None:
if key in values and values[key] is not None:
if _re_date_format.match(values[key]) is not None:
return datetime.datetime.strptime(
values[key], '%Y-%m-%d').date()
return None
def _string_value(values: dict, key: str) -> str:
if key in values and values[key]:
return str(values[key])
return ''
def _int_value(values: dict, key: str) -> int:
if key in values and values[key]:
return int(values[key])
return 0
def _list_value(values: dict, key: str) -> list:
if key in values and type(values[key]) is list:
return copy.deepcopy(values[key])
return []
def _timestamp2datetime(timestamp: int) -> datetime.datetime or None:
if timestamp is not None:
return datetime.datetime.fromtimestamp(timestamp)
return None
class Response(BaseModel):
domains_count: int
if sys.version_info < (3, 9):
domains_list: typing.List[str]
else:
domains_list: [str]
def __init__(self, values):
super().__init__()
self.domains_count = 0
self.domains_list = []
if values is not None:
self.domains_count = _int_value(values, 'domainsCount')
self.domains_list = _list_value(values, 'domainsList')
class ErrorMessage(BaseModel):
code: int
message: str
def __init__(self, values):
super().__init__()
self.int = 0
self.message = ''
if values is not None:
self.code = _int_value(values, 'code')
self.message = _string_value(values, 'messages')
|
[
"datetime.datetime.fromtimestamp",
"datetime.datetime.strptime",
"copy.deepcopy",
"re.compile"
] |
[((146, 188), 're.compile', 're.compile', (['"""^\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d$"""'], {}), "('^\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d$')\n", (156, 188), False, 'import re\n'), ((204, 294), 're.compile', 're.compile', (['"""^(\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\dT\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\+(\\\\d\\\\d):(\\\\d\\\\d)$"""'], {}), "(\n '^(\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\dT\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d)\\\\+(\\\\d\\\\d):(\\\\d\\\\d)$')\n", (214, 294), False, 'import re\n'), ((1418, 1444), 'copy.deepcopy', 'copy.deepcopy', (['values[key]'], {}), '(values[key])\n', (1431, 1444), False, 'import copy\n'), ((1576, 1618), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1607, 1618), False, 'import datetime\n'), ((652, 708), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(value, '%Y-%m-%dT%H:%M:%S%z')\n", (678, 708), False, 'import datetime\n'), ((939, 990), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['values[key]', '"""%Y-%m-%d"""'], {}), "(values[key], '%Y-%m-%d')\n", (965, 990), False, 'import datetime\n')]
|
"""13Migration
Revision ID: 2425b<PASSWORD>c
Revises: <PASSWORD>
Create Date: 2018-09-08 18:25:12.151586
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('title', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('pitches', 'title')
# ### end Alembic commands ###
|
[
"sqlalchemy.String",
"alembic.op.drop_column"
] |
[((588, 622), 'alembic.op.drop_column', 'op.drop_column', (['"""pitches"""', '"""title"""'], {}), "('pitches', 'title')\n", (602, 622), False, 'from alembic import op\n'), ((425, 446), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (434, 446), True, 'import sqlalchemy as sa\n')]
|
import numpy as np
import cv2
import pdb
# https://github.com/zju3dv/clean-pvnet/blob/master/lib/datasets/augmentation.py
def debug_visualize(image, mask, pts2d, sym_cor, name_prefix='debug'):
from random import sample
cv2.imwrite('{}_image.png'.format(name_prefix), image * 255)
cv2.imwrite('{}_mask.png'.format(name_prefix), mask * 255)
img_pts = image.copy() * 255
for i in range(pts2d.shape[0]):
x = int(round(pts2d[i, 0]))
y = int(round(pts2d[i, 1]))
img_pts = cv2.circle(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)
cv2.imwrite('{}_pts.png'.format(name_prefix), img_pts)
img_sym = image.copy() * 255
ys, xs = np.nonzero(mask)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor[y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
img_sym = cv2.line(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
cv2.imwrite('{}_sym.png'.format(name_prefix), img_sym)
def rotate_sym_cor(sym_cor, mask, R):
h, w = sym_cor.shape[:2]
ys, xs = np.nonzero(mask)
source = np.float32(np.stack([xs, ys], axis=-1))
delta = np.float32(sym_cor[ys, xs])
target = source + delta
last_col = np.ones((source.shape[0], 1), dtype=np.float32)
source = np.concatenate([source, last_col], axis=-1)
target = np.concatenate([target, last_col], axis=-1)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
source = np.matmul(source, R)[:, :2]
target = np.matmul(target, R)[:, :2]
source = np.uint32(np.round(source))
delta = target - source
# remove invalid indices
xs, ys = source[:, 0], source[:, 1]
valid = (xs > 0) & (xs < w) & (ys > 0) & (ys < h)
xs, ys, delta = xs[valid], ys[valid], delta[valid]
sym_cor = np.zeros_like(sym_cor)
sym_cor[ys, xs] = delta
return sym_cor
def rotate_instance(img, mask, hcoords, sym_cor, rot_ang_min, rot_ang_max):
h, w = img.shape[0], img.shape[1]
degree = np.random.uniform(rot_ang_min, rot_ang_max)
hs, ws = np.nonzero(mask)
R = cv2.getRotationMatrix2D((np.mean(ws), np.mean(hs)), degree, 1)
sym_cor = rotate_sym_cor(sym_cor, mask, R)
mask = cv2.warpAffine(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = cv2.warpAffine(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, last_col], axis=1)
hcoords = np.float32(np.matmul(hcoords, R))
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_resize_instance_v1(img, mask, hcoords, sym_cor, imheight, imwidth,
overlap_ratio=0.5, ratio_min=0.8, ratio_max=1.2):
'''
crop a region with [imheight*resize_ratio,imwidth*resize_ratio]
which at least overlap with foreground bbox with overlap
'''
hcoords_last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, hcoords_last_col], axis=1)
resize_ratio = np.random.uniform(ratio_min, ratio_max)
target_height = int(imheight * resize_ratio)
target_width = int(imwidth * resize_ratio)
img, mask, hcoords, sym_cor = crop_or_padding_to_fixed_size_instance(
img, mask, hcoords, sym_cor, target_height, target_width, overlap_ratio)
img = cv2.resize(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor = cv2.resize(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor /= resize_ratio
hcoords[:, 0] = hcoords[:, 0] / resize_ratio
hcoords[:, 1] = hcoords[:, 1] / resize_ratio
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size_instance(img, mask, hcoords, sym_cor, th, tw,
overlap_ratio=0.5):
h, w, _ = img.shape
hs, ws = np.nonzero(mask)
hmin, hmax = np.min(hs), np.max(hs)
wmin, wmax = np.min(ws), np.max(ws)
fh, fw = hmax - hmin, wmax - wmin
hpad, wpad = th >= h, tw >= w
hrmax = int(min(hmin + overlap_ratio * fh, h - th)) # h must > target_height else hrmax<0
hrmin = int(max(hmin + overlap_ratio * fh - th, 0))
wrmax = int(min(wmin + overlap_ratio * fw, w - tw)) # w must > target_width else wrmax<0
wrmin = int(max(wmin + overlap_ratio * fw - tw, 0))
hbeg = 0 if (hpad or hrmin == hrmax) else np.random.randint(hrmin, hrmax)
hend = hbeg + th
wbeg = 0 if (wpad or wrmin == wrmax) else np.random.randint(wrmin, wrmax) # if pad then [0,wend] will larger than [0,w], indexing it is safe
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
hcoords[:, 0] -= wbeg * hcoords[:, 2]
hcoords[:, 1] -= hbeg * hcoords[:, 2]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
hcoords[:, 0] += wbeg * hcoords[:, 2]
hcoords[:, 1] += hbeg * hcoords[:, 2]
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size(img, mask, sym_cor, th, tw):
h, w, _ = img.shape
hpad, wpad = th >= h, tw >= w
hbeg = 0 if hpad else np.random.randint(0, h - th)
wbeg = 0 if wpad else np.random.randint(0,
w - tw) # if pad then [0,wend] will larger than [0,w], indexing it is safe
hend = hbeg + th
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, sym_cor
|
[
"numpy.ones",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.mean",
"numpy.round",
"cv2.line",
"numpy.zeros_like",
"numpy.max",
"cv2.resize",
"numpy.stack",
"cv2.circle",
"numpy.asarray",
"numpy.min",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.float32",
"numpy.zeros",
"numpy.nonzero",
"numpy.matmul"
] |
[((675, 691), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (685, 691), True, 'import numpy as np\n'), ((1159, 1175), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (1169, 1175), True, 'import numpy as np\n'), ((1241, 1268), 'numpy.float32', 'np.float32', (['sym_cor[ys, xs]'], {}), '(sym_cor[ys, xs])\n', (1251, 1268), True, 'import numpy as np\n'), ((1312, 1359), 'numpy.ones', 'np.ones', (['(source.shape[0], 1)'], {'dtype': 'np.float32'}), '((source.shape[0], 1), dtype=np.float32)\n', (1319, 1359), True, 'import numpy as np\n'), ((1373, 1416), 'numpy.concatenate', 'np.concatenate', (['[source, last_col]'], {'axis': '(-1)'}), '([source, last_col], axis=-1)\n', (1387, 1416), True, 'import numpy as np\n'), ((1430, 1473), 'numpy.concatenate', 'np.concatenate', (['[target, last_col]'], {'axis': '(-1)'}), '([target, last_col], axis=-1)\n', (1444, 1473), True, 'import numpy as np\n'), ((1489, 1530), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 1]], dtype=np.float32)\n', (1499, 1530), True, 'import numpy as np\n'), ((1932, 1954), 'numpy.zeros_like', 'np.zeros_like', (['sym_cor'], {}), '(sym_cor)\n', (1945, 1954), True, 'import numpy as np\n'), ((2130, 2173), 'numpy.random.uniform', 'np.random.uniform', (['rot_ang_min', 'rot_ang_max'], {}), '(rot_ang_min, rot_ang_max)\n', (2147, 2173), True, 'import numpy as np\n'), ((2187, 2203), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (2197, 2203), True, 'import numpy as np\n'), ((2333, 2441), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'R', '(w, h)'], {'flags': 'cv2.INTER_NEAREST', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0)'}), '(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.\n BORDER_CONSTANT, borderValue=0)\n', (2347, 2441), False, 'import cv2\n'), ((2447, 2553), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'R', '(w, h)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0)'}), '(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.\n BORDER_CONSTANT, borderValue=0)\n', (2461, 2553), False, 'import cv2\n'), ((2564, 2605), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 1]], dtype=np.float32)\n', (2574, 2605), True, 'import numpy as np\n'), ((2679, 2727), 'numpy.ones', 'np.ones', (['(hcoords.shape[0], 1)'], {'dtype': 'np.float32'}), '((hcoords.shape[0], 1), dtype=np.float32)\n', (2686, 2727), True, 'import numpy as np\n'), ((2742, 2785), 'numpy.concatenate', 'np.concatenate', (['[hcoords, last_col]'], {'axis': '(1)'}), '([hcoords, last_col], axis=1)\n', (2756, 2785), True, 'import numpy as np\n'), ((3225, 3273), 'numpy.ones', 'np.ones', (['(hcoords.shape[0], 1)'], {'dtype': 'np.float32'}), '((hcoords.shape[0], 1), dtype=np.float32)\n', (3232, 3273), True, 'import numpy as np\n'), ((3288, 3339), 'numpy.concatenate', 'np.concatenate', (['[hcoords, hcoords_last_col]'], {'axis': '(1)'}), '([hcoords, hcoords_last_col], axis=1)\n', (3302, 3339), True, 'import numpy as np\n'), ((3360, 3399), 'numpy.random.uniform', 'np.random.uniform', (['ratio_min', 'ratio_max'], {}), '(ratio_min, ratio_max)\n', (3377, 3399), True, 'import numpy as np\n'), ((3663, 3731), 'cv2.resize', 'cv2.resize', (['img', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)\n', (3673, 3731), False, 'import cv2\n'), ((3743, 3813), 'cv2.resize', 'cv2.resize', (['mask', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)\n', (3753, 3813), False, 'import cv2\n'), ((3828, 3901), 'cv2.resize', 'cv2.resize', (['sym_cor', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)\n', (3838, 3901), False, 'import cv2\n'), ((4278, 4294), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4288, 4294), True, 'import numpy as np\n'), ((512, 569), 'cv2.circle', 'cv2.circle', (['img_pts', '(x, y)', '(2)', '(0, 0, 255)'], {'thickness': '(-1)'}), '(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)\n', (522, 569), False, 'import cv2\n'), ((961, 1018), 'cv2.line', 'cv2.line', (['img_sym', '(x, y)', '(x_cor, y_cor)', '(0, 0, 255)', '(1)'], {}), '(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)\n', (969, 1018), False, 'import cv2\n'), ((1200, 1227), 'numpy.stack', 'np.stack', (['[xs, ys]'], {'axis': '(-1)'}), '([xs, ys], axis=-1)\n', (1208, 1227), True, 'import numpy as np\n'), ((1602, 1622), 'numpy.matmul', 'np.matmul', (['source', 'R'], {}), '(source, R)\n', (1611, 1622), True, 'import numpy as np\n'), ((1643, 1663), 'numpy.matmul', 'np.matmul', (['target', 'R'], {}), '(target, R)\n', (1652, 1663), True, 'import numpy as np\n'), ((1694, 1710), 'numpy.round', 'np.round', (['source'], {}), '(source)\n', (1702, 1710), True, 'import numpy as np\n'), ((2811, 2832), 'numpy.matmul', 'np.matmul', (['hcoords', 'R'], {}), '(hcoords, R)\n', (2820, 2832), True, 'import numpy as np\n'), ((4313, 4323), 'numpy.min', 'np.min', (['hs'], {}), '(hs)\n', (4319, 4323), True, 'import numpy as np\n'), ((4325, 4335), 'numpy.max', 'np.max', (['hs'], {}), '(hs)\n', (4331, 4335), True, 'import numpy as np\n'), ((4353, 4363), 'numpy.min', 'np.min', (['ws'], {}), '(ws)\n', (4359, 4363), True, 'import numpy as np\n'), ((4365, 4375), 'numpy.max', 'np.max', (['ws'], {}), '(ws)\n', (4371, 4375), True, 'import numpy as np\n'), ((4797, 4828), 'numpy.random.randint', 'np.random.randint', (['hrmin', 'hrmax'], {}), '(hrmin, hrmax)\n', (4814, 4828), True, 'import numpy as np\n'), ((4896, 4927), 'numpy.random.randint', 'np.random.randint', (['wrmin', 'wrmax'], {}), '(wrmin, wrmax)\n', (4913, 4927), True, 'import numpy as np\n'), ((5291, 5329), 'numpy.zeros', 'np.zeros', (['[th, tw, 3]'], {'dtype': 'img.dtype'}), '([th, tw, 3], dtype=img.dtype)\n', (5299, 5329), True, 'import numpy as np\n'), ((5349, 5385), 'numpy.zeros', 'np.zeros', (['[th, tw]'], {'dtype': 'mask.dtype'}), '([th, tw], dtype=mask.dtype)\n', (5357, 5385), True, 'import numpy as np\n'), ((5408, 5450), 'numpy.zeros', 'np.zeros', (['[th, tw, 2]'], {'dtype': 'sym_cor.dtype'}), '([th, tw, 2], dtype=sym_cor.dtype)\n', (5416, 5450), True, 'import numpy as np\n'), ((6063, 6091), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - th)'], {}), '(0, h - th)\n', (6080, 6091), True, 'import numpy as np\n'), ((6118, 6146), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - tw)'], {}), '(0, w - tw)\n', (6135, 6146), True, 'import numpy as np\n'), ((6490, 6528), 'numpy.zeros', 'np.zeros', (['[th, tw, 3]'], {'dtype': 'img.dtype'}), '([th, tw, 3], dtype=img.dtype)\n', (6498, 6528), True, 'import numpy as np\n'), ((6548, 6584), 'numpy.zeros', 'np.zeros', (['[th, tw]'], {'dtype': 'mask.dtype'}), '([th, tw], dtype=mask.dtype)\n', (6556, 6584), True, 'import numpy as np\n'), ((6607, 6649), 'numpy.zeros', 'np.zeros', (['[th, tw, 2]'], {'dtype': 'sym_cor.dtype'}), '([th, tw, 2], dtype=sym_cor.dtype)\n', (6615, 6649), True, 'import numpy as np\n'), ((1539, 1576), 'numpy.concatenate', 'np.concatenate', (['[R, last_row]'], {'axis': '(0)'}), '([R, last_row], axis=0)\n', (1553, 1576), True, 'import numpy as np\n'), ((2237, 2248), 'numpy.mean', 'np.mean', (['ws'], {}), '(ws)\n', (2244, 2248), True, 'import numpy as np\n'), ((2250, 2261), 'numpy.mean', 'np.mean', (['hs'], {}), '(hs)\n', (2257, 2261), True, 'import numpy as np\n'), ((2614, 2651), 'numpy.concatenate', 'np.concatenate', (['[R, last_row]'], {'axis': '(0)'}), '([R, last_row], axis=0)\n', (2628, 2651), True, 'import numpy as np\n')]
|
import os
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class NsimdConan(ConanFile):
name = "nsimd"
homepage = "https://github.com/agenium-scale/nsimd"
description = "Agenium Scale vectorization library for CPUs and GPUs"
topics = ("hpc", "neon", "cuda", "avx", "simd", "avx2", "sse2", "aarch64", "avx512", "sse42", "rocm", "sve", "neon128")
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
# This used only when building the library.
# Most functionality is header only.
"simd": [None, "cpu", "sse2", "sse42", "avx", "avx2", "avx512_knl", "avx512_skylake", "neon128", "aarch64", "sve", "sve128", "sve256", "sve512", "sve1024", "sve2048", "cuda", "rocm"]
}
default_options = {
"shared": False,
"fPIC": True,
"simd": None
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
# Most of the library is header only.
# cpp files do not use STL.
del self.settings.compiler.libcxx
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if self.options.simd:
self._cmake.definitions["simd"] = self.options.simd
if self.settings.arch == "armv7hf":
self._cmake.definitions["NSIMD_ARM32_IS_ARMEL"] = False
self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
cmakefile_path = os.path.join(self._source_subfolder, "CMakeLists.txt")
tools.replace_in_file(cmakefile_path,
" SHARED ",
" ")
tools.replace_in_file(cmakefile_path,
"RUNTIME DESTINATION lib",
"RUNTIME DESTINATION bin")
tools.replace_in_file(cmakefile_path,
"set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)",
"")
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
[
"conans.tools.get",
"conans.tools.replace_in_file",
"conans.CMake",
"os.path.join",
"conans.tools.collect_libs"
] |
[((1613, 1723), 'conans.tools.get', 'tools.get', ([], {'strip_root': '(True)', 'destination': 'self._source_subfolder'}), "(**self.conan_data['sources'][self.version], strip_root=True,\n destination=self._source_subfolder)\n", (1622, 1723), False, 'from conans import ConanFile, CMake, tools\n'), ((1830, 1841), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1835, 1841), False, 'from conans import ConanFile, CMake, tools\n'), ((2302, 2356), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""CMakeLists.txt"""'], {}), "(self._source_subfolder, 'CMakeLists.txt')\n", (2314, 2356), False, 'import os\n'), ((2365, 2419), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakefile_path', '""" SHARED """', '""" """'], {}), "(cmakefile_path, ' SHARED ', ' ')\n", (2386, 2419), False, 'from conans import ConanFile, CMake, tools\n'), ((2488, 2583), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakefile_path', '"""RUNTIME DESTINATION lib"""', '"""RUNTIME DESTINATION bin"""'], {}), "(cmakefile_path, 'RUNTIME DESTINATION lib',\n 'RUNTIME DESTINATION bin')\n", (2509, 2583), False, 'from conans import ConanFile, CMake, tools\n'), ((2648, 2760), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakefile_path', '"""set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)"""', '""""""'], {}), "(cmakefile_path,\n 'set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)', '')\n", (2669, 2760), False, 'from conans import ConanFile, CMake, tools\n'), ((3150, 3174), 'conans.tools.collect_libs', 'tools.collect_libs', (['self'], {}), '(self)\n', (3168, 3174), False, 'from conans import ConanFile, CMake, tools\n')]
|
import pytest
@pytest.mark.asyncio
@pytest.mark.ttftt_engine
@pytest.mark.parametrize(
"query,errors",
[
(
"""
subscription Sub {
newDog {
name
}
newHuman {
name
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
newDog {
name
}
__typename
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
newDog {
name
}
newHuman {
name
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 11, "column": 13},
{"line": 2, "column": 66},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 13, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
],
)
async def test_issue87(engine, query, errors):
assert await engine.execute(query) == {"data": None, "errors": errors}
|
[
"pytest.mark.parametrize"
] |
[((64, 3172), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""query,errors"""', '[(\n """\n subscription Sub {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 2,\n \'column\': 30}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n subscription Sub {\n newDog {\n name\n }\n __typename\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 2,\n \'column\': 30}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n fragment MultipleSubscriptionsFields on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n\n subscription Sub {\n ...MultipleSubscriptionsFields\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 11, \'column\': 13}, {\'line\': 2,\n \'column\': 66}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n subscription Sub {\n ... on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 3,\n \'column\': 35}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n fragment MultipleSubscriptionsFields on Subscription {\n ... on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n }\n\n subscription Sub {\n ...MultipleSubscriptionsFields\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 13, \'column\': 13}, {\'line\': 3,\n \'column\': 35}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}])]'], {}), '(\'query,errors\', [(\n """\n subscription Sub {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 2,\n \'column\': 30}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n subscription Sub {\n newDog {\n name\n }\n __typename\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 2,\n \'column\': 30}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n fragment MultipleSubscriptionsFields on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n\n subscription Sub {\n ...MultipleSubscriptionsFields\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 11, \'column\': 13}, {\'line\': 2,\n \'column\': 66}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n subscription Sub {\n ... on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 2, \'column\': 13}, {\'line\': 3,\n \'column\': 35}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}]), (\n """\n fragment MultipleSubscriptionsFields on Subscription {\n ... on Subscription {\n newDog {\n name\n }\n newHuman {\n name\n }\n }\n }\n\n subscription Sub {\n ...MultipleSubscriptionsFields\n }\n """\n , [{\'message\': \'Subcription Sub must select only one top level field.\',\n \'path\': None, \'locations\': [{\'line\': 13, \'column\': 13}, {\'line\': 3,\n \'column\': 35}], \'extensions\': {\'rule\': \'5.2.3.1\', \'spec\': \'June 2018\',\n \'details\':\n \'https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field\',\n \'tag\': \'single-root-field\'}}])])\n', (87, 3172), False, 'import pytest\n')]
|
import argparse
from builtins import input
import datetime
import logging
import pprint
import sys
import ee
import openet.ssebop as ssebop
import utils
# from . import utils
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None,
max_ready=-1):
"""Compute default Tcorr image asset
Parameters
----------
ini_path : str
Input file path.
overwrite_flag : bool, optional
If True, overwrite existing files (the default is False).
delay_time : float, optional
Delay time in seconds between starting export tasks (or checking the
number of queued tasks, see "max_ready" parameter). The default is 0.
gee_key_file : str, None, optional
Earth Engine service account JSON key file (the default is None).
max_ready: int, optional
Maximum number of queued "READY" tasks. The default is -1 which is
implies no limit to the number of tasks that will be submitted.
"""
logging.info('\nCompute default Tcorr image asset')
ini = utils.read_ini(ini_path)
model_name = 'SSEBOP'
# model_name = ini['INPUTS']['et_model'].upper()
tmax_name = ini[model_name]['tmax_source']
export_id_fmt = 'tcorr_image_{product}_default'
tcorr_daily_coll_id = '{}/{}_daily'.format(
ini['EXPORT']['export_coll'], tmax_name.lower())
tcorr_default_img_id = '{}/{}_default'.format(
ini['EXPORT']['export_coll'], tmax_name.lower())
try:
tcorr_default = ini[model_name]['tcorr_default']
except:
tcorr_default = 0.978
if (tmax_name.upper() == 'CIMIS' and
ini['INPUTS']['end_date'] < '2003-10-01'):
logging.error(
'\nCIMIS is not currently available before 2003-10-01, exiting\n')
sys.exit()
elif (tmax_name.upper() == 'DAYMET' and
ini['INPUTS']['end_date'] > '2018-12-31'):
logging.warning(
'\nDAYMET is not currently available past 2018-12-31, '
'using median Tmax values\n')
# sys.exit()
# elif (tmax_name.upper() == 'TOPOWX' and
# ini['INPUTS']['end_date'] > '2017-12-31'):
# logging.warning(
# '\nDAYMET is not currently available past 2017-12-31, '
# 'using median Tmax values\n')
# # sys.exit()
logging.info('\nInitializing Earth Engine')
if gee_key_file:
logging.info(' Using service account key file: {}'.format(gee_key_file))
# The "EE_ACCOUNT" parameter is not used if the key file is valid
ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file))
else:
ee.Initialize()
logging.debug('\nTmax properties')
tmax_source = tmax_name.split('_', 1)[0]
tmax_version = tmax_name.split('_', 1)[1]
# tmax_coll_id = 'projects/earthengine-legacy/assets/' \
# 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
# tmax_coll = ee.ImageCollection(tmax_coll_id)
# tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
# logging.debug(' Collection: {}'.format(tmax_coll_id))
logging.debug(' Source: {}'.format(tmax_source))
logging.debug(' Version: {}'.format(tmax_version))
# Get the Tcorr daily image collection properties
logging.debug('\nTcorr Image properties')
tcorr_img = ee.Image(ee.ImageCollection(tcorr_daily_coll_id).first())
tcorr_info = utils.get_info(ee.Image(tcorr_img))
tcorr_geo = tcorr_info['bands'][0]['crs_transform']
tcorr_crs = tcorr_info['bands'][0]['crs']
tcorr_shape = tcorr_info['bands'][0]['dimensions']
# tcorr_geo = ee.Image(tcorr_img).projection().getInfo()['transform']
# tcorr_crs = ee.Image(tcorr_img).projection().getInfo()['crs']
# tcorr_shape = ee.Image(tcorr_img).getInfo()['bands'][0]['dimensions']
tcorr_extent = [tcorr_geo[2], tcorr_geo[5] + tcorr_shape[1] * tcorr_geo[4],
tcorr_geo[2] + tcorr_shape[0] * tcorr_geo[0], tcorr_geo[5]]
logging.debug(' Shape: {}'.format(tcorr_shape))
logging.debug(' Extent: {}'.format(tcorr_extent))
logging.debug(' Geo: {}'.format(tcorr_geo))
logging.debug(' CRS: {}'.format(tcorr_crs))
# Get current running tasks
tasks = utils.get_ee_tasks()
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug(' Tasks: {}\n'.format(len(tasks)))
input('ENTER')
# # Limit by year
# try:
# year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
# except:
# logging.info('\nTCORR "years" parameter not set in the INI,'
# '\n Defaulting to all available years\n')
# year_list = []
export_id = export_id_fmt.format(product=tmax_name.lower())
logging.info(' Export ID: {}'.format(export_id))
logging.info(' Asset ID: {}'.format(tcorr_default_img_id))
if overwrite_flag:
if export_id in tasks.keys():
logging.debug(' Task already submitted, cancelling')
ee.data.cancelTask(tasks[export_id]['id'])
# This is intentionally not an "elif" so that a task can be
# cancelled and an existing image/file/asset can be removed
if ee.data.getInfo(tcorr_default_img_id):
logging.debug(' Asset already exists, removing')
ee.data.deleteAsset(tcorr_default_img_id)
else:
if export_id in tasks.keys():
logging.debug(' Task already submitted, exiting')
return False
elif ee.data.getInfo(tcorr_default_img_id):
logging.debug(' Asset already exists, exiting')
return False
tcorr_daily_coll = ee.ImageCollection(tcorr_daily_coll_id)
output_img = tcorr_daily_coll.mosaic().multiply(0).add(tcorr_default)\
.updateMask(1).rename(['tcorr'])\
.set({
# 'system:time_start': utils.millis(iter_start_dt),
'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
'model_name': model_name,
'model_version': ssebop.__version__,
'tmax_source': tmax_source.upper(),
'tmax_version': tmax_version.upper(),
})
logging.debug(' Building export task')
task = ee.batch.Export.image.toAsset(
image=ee.Image(output_img),
description=export_id,
assetId=tcorr_default_img_id,
crs=tcorr_crs,
crsTransform='[' + ','.join(list(map(str, tcorr_geo))) + ']',
dimensions='{0}x{1}'.format(*tcorr_shape),
)
logging.debug(' Starting export task')
utils.ee_task_start(task)
# Pause before starting the next export task
utils.delay_task(delay_time, max_ready)
logging.debug('')
def arg_parse():
""""""
parser = argparse.ArgumentParser(
description='Compute/export default Tcorr image asset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i', '--ini', type=utils.arg_valid_file,
help='Input file', metavar='FILE')
parser.add_argument(
'--delay', default=0, type=float,
help='Delay (in seconds) between each export tasks')
parser.add_argument(
'--key', type=utils.arg_valid_file, metavar='FILE',
help='JSON key file')
parser.add_argument(
'--ready', default=-1, type=int,
help='Maximum number of queued READY tasks')
parser.add_argument(
'-o', '--overwrite', default=False, action='store_true',
help='Force overwrite of existing files')
parser.add_argument(
'-d', '--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action='store_const', dest='loglevel')
args = parser.parse_args()
# Prompt user to select an INI file if not set at command line
# if not args.ini:
# args.ini = utils.get_ini_path(os.getcwd())
return args
if __name__ == "__main__":
args = arg_parse()
logging.basicConfig(level=args.loglevel, format='%(message)s')
logging.getLogger('googleapiclient').setLevel(logging.ERROR)
main(ini_path=args.ini, overwrite_flag=args.overwrite,
delay_time=args.delay, gee_key_file=args.key, max_ready=args.ready)
|
[
"argparse.ArgumentParser",
"utils.read_ini",
"utils.delay_task",
"ee.ServiceAccountCredentials",
"logging.error",
"ee.data.cancelTask",
"logging.warning",
"ee.Initialize",
"utils.get_ee_tasks",
"datetime.datetime.today",
"sys.exit",
"logging.debug",
"ee.data.getInfo",
"logging.basicConfig",
"ee.ImageCollection",
"ee.Image",
"builtins.input",
"utils.ee_task_start",
"logging.info",
"ee.data.deleteAsset",
"logging.getLogger"
] |
[((992, 1046), 'logging.info', 'logging.info', (['"""\nCompute default Tcorr image asset"""'], {}), '("""\nCompute default Tcorr image asset""")\n', (1004, 1046), False, 'import logging\n'), ((1055, 1079), 'utils.read_ini', 'utils.read_ini', (['ini_path'], {}), '(ini_path)\n', (1069, 1079), False, 'import utils\n'), ((2329, 2375), 'logging.info', 'logging.info', (['"""\nInitializing Earth Engine"""'], {}), '("""\nInitializing Earth Engine""")\n', (2341, 2375), False, 'import logging\n'), ((2669, 2703), 'logging.debug', 'logging.debug', (['"""\nTmax properties"""'], {}), "('\\nTmax properties')\n", (2682, 2703), False, 'import logging\n'), ((3285, 3329), 'logging.debug', 'logging.debug', (['"""\nTcorr Image properties"""'], {}), '("""\nTcorr Image properties""")\n', (3298, 3329), False, 'import logging\n'), ((4240, 4260), 'utils.get_ee_tasks', 'utils.get_ee_tasks', ([], {}), '()\n', (4258, 4260), False, 'import utils\n'), ((5662, 5701), 'ee.ImageCollection', 'ee.ImageCollection', (['tcorr_daily_coll_id'], {}), '(tcorr_daily_coll_id)\n', (5680, 5701), False, 'import ee\n'), ((6177, 6216), 'logging.debug', 'logging.debug', (['""" Building export task"""'], {}), "(' Building export task')\n", (6190, 6216), False, 'import logging\n'), ((6519, 6558), 'logging.debug', 'logging.debug', (['""" Starting export task"""'], {}), "(' Starting export task')\n", (6532, 6558), False, 'import logging\n'), ((6563, 6588), 'utils.ee_task_start', 'utils.ee_task_start', (['task'], {}), '(task)\n', (6582, 6588), False, 'import utils\n'), ((6643, 6682), 'utils.delay_task', 'utils.delay_task', (['delay_time', 'max_ready'], {}), '(delay_time, max_ready)\n', (6659, 6682), False, 'import utils\n'), ((6687, 6704), 'logging.debug', 'logging.debug', (['""""""'], {}), "('')\n", (6700, 6704), False, 'import logging\n'), ((6748, 6893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute/export default Tcorr image asset"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Compute/export default Tcorr image asset', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (6771, 6893), False, 'import argparse\n'), ((7938, 8000), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'args.loglevel', 'format': '"""%(message)s"""'}), "(level=args.loglevel, format='%(message)s')\n", (7957, 8000), False, 'import logging\n'), ((1689, 1776), 'logging.error', 'logging.error', (['"""\nCIMIS is not currently available before 2003-10-01, exiting\n"""'], {}), '(\n """\nCIMIS is not currently available before 2003-10-01, exiting\n""")\n', (1702, 1776), False, 'import logging\n'), ((1791, 1801), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1799, 1801), False, 'import sys\n'), ((2648, 2663), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (2661, 2663), False, 'import ee\n'), ((3433, 3452), 'ee.Image', 'ee.Image', (['tcorr_img'], {}), '(tcorr_img)\n', (3441, 3452), False, 'import ee\n'), ((4392, 4406), 'builtins.input', 'input', (['"""ENTER"""'], {}), "('ENTER')\n", (4397, 4406), False, 'from builtins import input\n'), ((5209, 5246), 'ee.data.getInfo', 'ee.data.getInfo', (['tcorr_default_img_id'], {}), '(tcorr_default_img_id)\n', (5224, 5246), False, 'import ee\n'), ((1909, 2019), 'logging.warning', 'logging.warning', (['"""\nDAYMET is not currently available past 2018-12-31, using median Tmax values\n"""'], {}), '(\n """\nDAYMET is not currently available past 2018-12-31, using median Tmax values\n"""\n )\n', (1924, 2019), False, 'import logging\n'), ((2572, 2628), 'ee.ServiceAccountCredentials', 'ee.ServiceAccountCredentials', (['"""x"""'], {'key_file': 'gee_key_file'}), "('x', key_file=gee_key_file)\n", (2600, 2628), False, 'import ee\n'), ((4953, 5006), 'logging.debug', 'logging.debug', (['""" Task already submitted, cancelling"""'], {}), "(' Task already submitted, cancelling')\n", (4966, 5006), False, 'import logging\n'), ((5019, 5061), 'ee.data.cancelTask', 'ee.data.cancelTask', (["tasks[export_id]['id']"], {}), "(tasks[export_id]['id'])\n", (5037, 5061), False, 'import ee\n'), ((5260, 5309), 'logging.debug', 'logging.debug', (['""" Asset already exists, removing"""'], {}), "(' Asset already exists, removing')\n", (5273, 5309), False, 'import logging\n'), ((5322, 5363), 'ee.data.deleteAsset', 'ee.data.deleteAsset', (['tcorr_default_img_id'], {}), '(tcorr_default_img_id)\n', (5341, 5363), False, 'import ee\n'), ((5424, 5474), 'logging.debug', 'logging.debug', (['""" Task already submitted, exiting"""'], {}), "(' Task already submitted, exiting')\n", (5437, 5474), False, 'import logging\n'), ((5513, 5550), 'ee.data.getInfo', 'ee.data.getInfo', (['tcorr_default_img_id'], {}), '(tcorr_default_img_id)\n', (5528, 5550), False, 'import ee\n'), ((6273, 6293), 'ee.Image', 'ee.Image', (['output_img'], {}), '(output_img)\n', (6281, 6293), False, 'import ee\n'), ((8005, 8041), 'logging.getLogger', 'logging.getLogger', (['"""googleapiclient"""'], {}), "('googleapiclient')\n", (8022, 8041), False, 'import logging\n'), ((3352, 3391), 'ee.ImageCollection', 'ee.ImageCollection', (['tcorr_daily_coll_id'], {}), '(tcorr_daily_coll_id)\n', (3370, 3391), False, 'import ee\n'), ((4268, 4287), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4285, 4287), False, 'import logging\n'), ((5564, 5612), 'logging.debug', 'logging.debug', (['""" Asset already exists, exiting"""'], {}), "(' Asset already exists, exiting')\n", (5577, 5612), False, 'import logging\n'), ((5928, 5953), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5951, 5953), False, 'import datetime\n')]
|
import numpy as np
import os
import gym
import torch
import torch.nn as nn
import collections
import copy
import random
# hype-params
learn_freq = 5 #经验池攒一些经验再开启训练
buffer_size = 20000 #经验池大小
buffer_init_size = 200 #开启训练最低经验条数
batch_size = 32 #每次sample的数量
learning_rate = 0.001 #学习率
GAMMA = 0.99 # reward折扣因子
class Model(nn.Module):
def __init__(self, act_dim, state_dim):
super(Model, self).__init__()
hidden1_size = 128
hidden2_size = 128
self.input_layer = nn.Linear(state_dim, hidden1_size)
self.input_layer.weight.data.normal_(0, 0.1)
self.hidden_layer = nn.Linear(hidden1_size, hidden2_size)
self.hidden_layer.weight.data.normal_(0, 0.1)
self.output_layer = nn.Linear(hidden2_size, act_dim)
self.output_layer.weight.data.normal_(0, 0.1)
def forward(self, state):
h1 = nn.functional.relu(self.input_layer(state))
h2 = nn.functional.relu(self.hidden_layer(h1))
Q = self.output_layer(h2)
return Q
class DQN:
def __init__(self, model, act_dim=None, gamma=None, lr=None):
self.model = model
self.target_model = copy.deepcopy(model)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
self.loss = nn.MSELoss()
self.act_dim = act_dim
self.lr = lr
self.gamma = gamma
def predict(self, state):
return self.model.forward(state) # shape: batch_size x act_dim
def learn(self, state, action, reward, state_next, done): # shape: batch_size x 1
# 根据target网络求target Q
next_values = self.target_model.forward(state_next).detach() # 阻断target梯度, shape: batch_size x act_dim
target_value = reward + (1.0 - done)*self.gamma*next_values.max(1)[0] # shape: batch_size x 1
# 根据当前网络获取Q(s, a)
curr_value = self.model.forward(state)
action = action.unsqueeze(1)
pred_value = torch.gather(curr_value, 1, action.long()) # batch_size x act_dim中以第二维取action对应的Q值成为batch_size x 1
cost = self.loss(pred_value, target_value)
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict()) # 更新target网络参数
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state_batch, action_batch, reward_batch, state_netx_batch, done_batch = [], [], [], [], []
for exp in batch:
s, a, r, s_next, done = exp
state_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
state_netx_batch.append(s_next)
done_batch.append(done)
return torch.from_numpy(np.array(state_batch).astype('float32')), \
torch.from_numpy(np.array(action_batch).astype('int32')), \
torch.from_numpy(np.array(reward_batch).astype('float32')), \
torch.from_numpy(np.array(state_netx_batch).astype('float32')), \
torch.from_numpy(np.array(done_batch).astype('float32'))
def __len__(self):
return len(self.buffer)
class Agent:
def __init__(self, algorithm, state_dim, act_dim, epsilon=0.1, epsilon_fade=0.0):
self.dqn = algorithm
self.state_dim = state_dim
self.act_dim = act_dim
self.steps = 0
self.update_target_steps = 200
self.epsilon = epsilon
self.epsilon_fade = epsilon_fade
def explore(self, state):
sample = np.random.rand()
if sample < self.epsilon:
action = np.random.randint(self.act_dim)
else:
action = self.greedy(state)
self.epsilon = max(0.01, self.epsilon - self.epsilon_fade)
return action
def greedy(self, state):
state = torch.from_numpy(state)
state = torch.tensor(state, dtype=torch.float32)
pred_value = self.dqn.target_model.forward(state)
values = pred_value.detach().numpy()
values = np.squeeze(values, axis=None)
action = np.argmax(values) # 选择值最大的下标
return action
def learn(self, state, action, reward, state_next, done):
if self.steps % self.update_target_steps == 0:
self.dqn.update_target()
self.steps += 1
cost = self.dqn.learn(state, action, reward, state_next, done)
return cost
def evaluate(env, agent, render=True):
eval_reward = []
for i in range(10):
state = env.reset()
episode_reward = 0
while True:
action = agent.greedy(state)
state, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
exp_buffer = ReplayMemory(buffer_size)
model = Model(act_dim=action_dim, state_dim=state_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=learning_rate)
agent = Agent(algorithm, state_dim=state_dim, act_dim=action_dim, epsilon=0.1, epsilon_fade=1e-6)
state = env.reset()
while(len(exp_buffer)<buffer_init_size):
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
exp_buffer.append((state, action, reward, state_next, done))
state = state_next
if done:
state = env.reset()
episode = 0
while episode < 20000:
for i in range(0, 100):
episode += 1
total_reward = 0
state = env.reset()
step = 0
while True:
step += 1
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
# env.render()
exp_buffer.append((state, action, reward, state_next, done))
# train
if len(exp_buffer) > buffer_init_size and step%learn_freq == 0:
(state_batch, action_batch, reward_batch, state_next_batch, done_batch) = exp_buffer.sample(batch_size)
loss = agent.learn(state_batch, action_batch, reward_batch, state_next_batch, done_batch)
total_reward += reward
state = state_next
if done:
break
eval_reward = evaluate(env, agent, render=True)
print('episode: %d e_greed: %.5f test_reward: %.1f' %(episode, agent.epsilon, eval_reward))
torch.save(agent.dqn.target_model, './dqn.pkl')
|
[
"copy.deepcopy",
"torch.nn.MSELoss",
"gym.make",
"numpy.argmax",
"random.sample",
"torch.save",
"numpy.mean",
"numpy.random.randint",
"numpy.array",
"torch.nn.Linear",
"numpy.random.rand",
"torch.tensor",
"numpy.squeeze",
"collections.deque",
"torch.from_numpy"
] |
[((5045, 5065), 'numpy.mean', 'np.mean', (['eval_reward'], {}), '(eval_reward)\n', (5052, 5065), True, 'import numpy as np\n'), ((5104, 5127), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (5112, 5127), False, 'import gym\n'), ((6878, 6925), 'torch.save', 'torch.save', (['agent.dqn.target_model', '"""./dqn.pkl"""'], {}), "(agent.dqn.target_model, './dqn.pkl')\n", (6888, 6925), False, 'import torch\n'), ((497, 531), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'hidden1_size'], {}), '(state_dim, hidden1_size)\n', (506, 531), True, 'import torch.nn as nn\n'), ((613, 650), 'torch.nn.Linear', 'nn.Linear', (['hidden1_size', 'hidden2_size'], {}), '(hidden1_size, hidden2_size)\n', (622, 650), True, 'import torch.nn as nn\n'), ((733, 765), 'torch.nn.Linear', 'nn.Linear', (['hidden2_size', 'act_dim'], {}), '(hidden2_size, act_dim)\n', (742, 765), True, 'import torch.nn as nn\n'), ((1148, 1168), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1161, 1168), False, 'import copy\n'), ((1274, 1286), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1284, 1286), True, 'import torch.nn as nn\n'), ((2391, 2425), 'collections.deque', 'collections.deque', ([], {'maxlen': 'max_size'}), '(maxlen=max_size)\n', (2408, 2425), False, 'import collections\n'), ((2537, 2575), 'random.sample', 'random.sample', (['self.buffer', 'batch_size'], {}), '(self.buffer, batch_size)\n', (2550, 2575), False, 'import random\n'), ((3740, 3756), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3754, 3756), True, 'import numpy as np\n'), ((4033, 4056), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4049, 4056), False, 'import torch\n'), ((4073, 4113), 'torch.tensor', 'torch.tensor', (['state'], {'dtype': 'torch.float32'}), '(state, dtype=torch.float32)\n', (4085, 4113), False, 'import torch\n'), ((4234, 4263), 'numpy.squeeze', 'np.squeeze', (['values'], {'axis': 'None'}), '(values, axis=None)\n', (4244, 4263), True, 'import numpy as np\n'), ((4281, 4298), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (4290, 4298), True, 'import numpy as np\n'), ((3812, 3843), 'numpy.random.randint', 'np.random.randint', (['self.act_dim'], {}), '(self.act_dim)\n', (3829, 3843), True, 'import numpy as np\n'), ((2958, 2979), 'numpy.array', 'np.array', (['state_batch'], {}), '(state_batch)\n', (2966, 2979), True, 'import numpy as np\n'), ((3034, 3056), 'numpy.array', 'np.array', (['action_batch'], {}), '(action_batch)\n', (3042, 3056), True, 'import numpy as np\n'), ((3109, 3131), 'numpy.array', 'np.array', (['reward_batch'], {}), '(reward_batch)\n', (3117, 3131), True, 'import numpy as np\n'), ((3186, 3212), 'numpy.array', 'np.array', (['state_netx_batch'], {}), '(state_netx_batch)\n', (3194, 3212), True, 'import numpy as np\n'), ((3267, 3287), 'numpy.array', 'np.array', (['done_batch'], {}), '(done_batch)\n', (3275, 3287), True, 'import numpy as np\n')]
|
from intake.source.base import DataSource
from intake.source import import_name
class StreamzSource(DataSource):
name = 'streamz'
container = 'streamz'
"""
"""
def __init__(self, method_chain, start=False, metadata=None, **kwargs):
"""
method_chain: list[tuple(str, dict)]
Each element of the list is like (method_name, kwargs)
which will be applied to the stream object in sequence.
"""
self.method = method_chain
self.kwargs = kwargs
self.stream = None
self.start = start
super().__init__(metadata=metadata)
def _get_schema(self):
import streamz
if self.stream is None:
stream = streamz.Stream
for part in self.method:
kw = part.get("kwargs", {})
for functional in part.get("func_value", []):
kw[functional] = import_name(kw[functional])
stream = getattr(stream, part["method"])(**part.get("kwargs", {}))
self.stream = stream
if self.start:
self.stream.start()
return {'stream': str(self.stream)}
def read(self):
self._get_schema()
return self.stream
def to_dask(self):
return self.read().scatter()
@property
def plot(self):
# override since there is no hvPlot(streamz), only streamz.hvPlot
try:
from hvplot import hvPlot
except ImportError:
raise ImportError("The intake plotting API requires hvplot."
"hvplot may be installed with:\n\n"
"`conda install -c pyviz hvplot` or "
"`pip install hvplot`.")
fields = self.metadata.get('fields', {})
for attrs in fields.values():
if 'range' in attrs:
attrs['range'] = tuple(attrs['range'])
s = self.read()
plot = s.plot
plot._metadata['fields'] = fields
plot._plots = self.metadata.get('plots', {})
s.start()
return plot
|
[
"intake.source.import_name"
] |
[((922, 949), 'intake.source.import_name', 'import_name', (['kw[functional]'], {}), '(kw[functional])\n', (933, 949), False, 'from intake.source import import_name\n')]
|
from collections import namedtuple
AcquireCredResult = namedtuple('AcquireCredResult',
['creds', 'mechs', 'lifetime'])
InquireCredResult = namedtuple('InquireCredResult',
['name', 'lifetime', 'usage',
'mechs'])
InquireCredByMechResult = namedtuple('InquireCredByMechResult',
['name', 'init_lifetime',
'accept_lifetime', 'usage'])
AddCredResult = namedtuple('AddCredResult',
['creds', 'mechs', 'init_lifetime',
'accept_lifetime'])
DisplayNameResult = namedtuple('DisplayNameResult',
['name', 'name_type'])
WrapResult = namedtuple('WrapResult',
['message', 'encrypted'])
UnwrapResult = namedtuple('UnwrapResult',
['message', 'encrypted', 'qop'])
AcceptSecContextResult = namedtuple('AcceptSecContextResult',
['context', 'initiator_name',
'mech', 'token', 'flags', 'lifetime',
'delegated_creds', 'more_steps'])
InitSecContextResult = namedtuple('InitSecContextResult',
['context', 'mech', 'flags', 'token',
'lifetime', 'more_steps'])
InquireContextResult = namedtuple('InquireContextResult',
['initiator_name', 'target_name',
'lifetime', 'mech', 'flags',
'locally_init', 'complete'])
StoreCredResult = namedtuple('StoreCredResult',
['mechs', 'usage'])
IOVUnwrapResult = namedtuple('IOVUnwrapResult',
['encrypted', 'qop'])
InquireNameResult = namedtuple('InquireNameResult',
['attrs', 'is_mech_name', 'mech'])
GetNameAttributeResult = namedtuple('GetNamedAttributeResult',
['values', 'display_values',
'authenticated', 'complete'])
|
[
"collections.namedtuple"
] |
[((57, 120), 'collections.namedtuple', 'namedtuple', (['"""AcquireCredResult"""', "['creds', 'mechs', 'lifetime']"], {}), "('AcquireCredResult', ['creds', 'mechs', 'lifetime'])\n", (67, 120), False, 'from collections import namedtuple\n'), ((174, 245), 'collections.namedtuple', 'namedtuple', (['"""InquireCredResult"""', "['name', 'lifetime', 'usage', 'mechs']"], {}), "('InquireCredResult', ['name', 'lifetime', 'usage', 'mechs'])\n", (184, 245), False, 'from collections import namedtuple\n'), ((337, 433), 'collections.namedtuple', 'namedtuple', (['"""InquireCredByMechResult"""', "['name', 'init_lifetime', 'accept_lifetime', 'usage']"], {}), "('InquireCredByMechResult', ['name', 'init_lifetime',\n 'accept_lifetime', 'usage'])\n", (347, 433), False, 'from collections import namedtuple\n'), ((523, 610), 'collections.namedtuple', 'namedtuple', (['"""AddCredResult"""', "['creds', 'mechs', 'init_lifetime', 'accept_lifetime']"], {}), "('AddCredResult', ['creds', 'mechs', 'init_lifetime',\n 'accept_lifetime'])\n", (533, 610), False, 'from collections import namedtuple\n'), ((684, 738), 'collections.namedtuple', 'namedtuple', (['"""DisplayNameResult"""', "['name', 'name_type']"], {}), "('DisplayNameResult', ['name', 'name_type'])\n", (694, 738), False, 'from collections import namedtuple\n'), ((785, 835), 'collections.namedtuple', 'namedtuple', (['"""WrapResult"""', "['message', 'encrypted']"], {}), "('WrapResult', ['message', 'encrypted'])\n", (795, 835), False, 'from collections import namedtuple\n'), ((877, 936), 'collections.namedtuple', 'namedtuple', (['"""UnwrapResult"""', "['message', 'encrypted', 'qop']"], {}), "('UnwrapResult', ['message', 'encrypted', 'qop'])\n", (887, 936), False, 'from collections import namedtuple\n'), ((990, 1132), 'collections.namedtuple', 'namedtuple', (['"""AcceptSecContextResult"""', "['context', 'initiator_name', 'mech', 'token', 'flags', 'lifetime',\n 'delegated_creds', 'more_steps']"], {}), "('AcceptSecContextResult', ['context', 'initiator_name', 'mech',\n 'token', 'flags', 'lifetime', 'delegated_creds', 'more_steps'])\n", (1000, 1132), False, 'from collections import namedtuple\n'), ((1264, 1367), 'collections.namedtuple', 'namedtuple', (['"""InitSecContextResult"""', "['context', 'mech', 'flags', 'token', 'lifetime', 'more_steps']"], {}), "('InitSecContextResult', ['context', 'mech', 'flags', 'token',\n 'lifetime', 'more_steps'])\n", (1274, 1367), False, 'from collections import namedtuple\n'), ((1458, 1588), 'collections.namedtuple', 'namedtuple', (['"""InquireContextResult"""', "['initiator_name', 'target_name', 'lifetime', 'mech', 'flags',\n 'locally_init', 'complete']"], {}), "('InquireContextResult', ['initiator_name', 'target_name',\n 'lifetime', 'mech', 'flags', 'locally_init', 'complete'])\n", (1468, 1588), False, 'from collections import namedtuple\n'), ((1709, 1758), 'collections.namedtuple', 'namedtuple', (['"""StoreCredResult"""', "['mechs', 'usage']"], {}), "('StoreCredResult', ['mechs', 'usage'])\n", (1719, 1758), False, 'from collections import namedtuple\n'), ((1808, 1859), 'collections.namedtuple', 'namedtuple', (['"""IOVUnwrapResult"""', "['encrypted', 'qop']"], {}), "('IOVUnwrapResult', ['encrypted', 'qop'])\n", (1818, 1859), False, 'from collections import namedtuple\n'), ((1911, 1977), 'collections.namedtuple', 'namedtuple', (['"""InquireNameResult"""', "['attrs', 'is_mech_name', 'mech']"], {}), "('InquireNameResult', ['attrs', 'is_mech_name', 'mech'])\n", (1921, 1977), False, 'from collections import namedtuple\n'), ((2036, 2136), 'collections.namedtuple', 'namedtuple', (['"""GetNamedAttributeResult"""', "['values', 'display_values', 'authenticated', 'complete']"], {}), "('GetNamedAttributeResult', ['values', 'display_values',\n 'authenticated', 'complete'])\n", (2046, 2136), False, 'from collections import namedtuple\n')]
|
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from gtts import gTTS
import PyPDF2
import os
Tk().withdraw()
filelocation = askopenfilename()
basename = os.path.basename(filelocation)
filename = os.path.splitext(basename)[0]
with open(filelocation, 'rb') as f:
text = PyPDF2.PdfFileReader(f, strict=False)
print(text.numPages)
language = 'en'
output_text = ''
for pagenum in range (0, text.numPages):
pageObj = text.getPage(pagenum)
output_text = output_text + pageObj.extractText()
output = gTTS(text=output_text, lang=language, slow=False)
output.save(filename+".mp3")
f.close()
|
[
"os.path.basename",
"gtts.gTTS",
"tkinter.filedialog.askopenfilename",
"PyPDF2.PdfFileReader",
"os.path.splitext",
"tkinter.Tk"
] |
[((159, 176), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (174, 176), False, 'from tkinter.filedialog import askopenfilename\n'), ((191, 221), 'os.path.basename', 'os.path.basename', (['filelocation'], {}), '(filelocation)\n', (207, 221), False, 'import os\n'), ((236, 262), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (252, 262), False, 'import os\n'), ((319, 356), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['f'], {'strict': '(False)'}), '(f, strict=False)\n', (339, 356), False, 'import PyPDF2\n'), ((127, 131), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (129, 131), False, 'from tkinter import Tk\n'), ((596, 645), 'gtts.gTTS', 'gTTS', ([], {'text': 'output_text', 'lang': 'language', 'slow': '(False)'}), '(text=output_text, lang=language, slow=False)\n', (600, 645), False, 'from gtts import gTTS\n')]
|
# https://www.hackerrank.com/challenges/text-wrap/problem
import textwrap
def wrap(string, max_width):
# return "\n".join(string[i:i+max_width] for i in range(0, len(string), max_width))
return textwrap.fill(string, max_width)
if __name__ == "__main__":
string, max_width = input(), int(input())
# ABCDEFGHIJKLIMNOQRSTUVWXYZ
# 4
result = wrap(string, max_width)
print(result)
# ABCD
# EFGH
# IJKL
# IMNO
# QRST
# UVWX
# YZ
|
[
"textwrap.fill"
] |
[((206, 238), 'textwrap.fill', 'textwrap.fill', (['string', 'max_width'], {}), '(string, max_width)\n', (219, 238), False, 'import textwrap\n')]
|
import tensorflow as tf
from .utils import noisy_labels, smooth_fake_labels, smooth_real_labels, CONFIG
class SGANDiscriminatorLoss(tf.keras.losses.Loss):
def __init__(self):
"""Standard GAN loss for discriminator.
"""
super().__init__()
self.bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, real_output, fake_output):
"""Loss for the discriminator.
Applies technique from GAN hacks to stabilize training:
- Label smoothing
- Label noise
Args:
real_output (tf.Tensor): output of discriminator on real images
fake_output (tf.Tensor): output of discriminator on fake images
Returns:
float: discriminator loss
"""
# Real images must be predicted 1 (noised and smoothed)
real_labels = tf.ones_like(real_output)
if CONFIG["smooth_labels"]:
real_labels = noisy_labels(real_labels, CONFIG["label_noise"])
real_labels = smooth_real_labels(real_labels)
# Fake images must be predicted 0 (noised and smoothed)
fake_labels = tf.zeros_like(fake_output)
if CONFIG["smooth_labels"]:
fake_labels = noisy_labels(fake_labels, CONFIG["label_noise"])
fake_labels = smooth_fake_labels(fake_labels)
real_loss = self.bce(real_labels, real_output)
fake_loss = self.bce(fake_labels, fake_output)
total_loss = real_loss + fake_loss
return total_loss
class SGANGeneratorLoss(tf.keras.losses.Loss):
def __init__(self):
"""Standard GAN loss for generator.
"""
super().__init__()
self.bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, real_output, fake_output):
"""Loss for the generator. The generator must fool the discriminator,
making it predict fake images as real.
Args:
real_output (tf.Tensor): output of the discriminator on real images
(actually not used, just to comply with interface function signature)
fake_output (tf.Tensor): output of the discriminator on fake (generated) images
Returns:
float: generator loss
"""
loss = self.bce(tf.ones_like(fake_output), fake_output)
return loss
class WGANDiscriminatorLoss(tf.keras.losses.Loss):
def __init__(self) -> None:
"""Wasserstein loss for the 'critic' from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).
"""
super().__init__()
def call(self, real_output, gen_output):
# loss for the output of the discriminator on real images
real_loss = tf.reduce_mean(real_output)
# loss for the output of the discriminator on generated images
gen_loss = tf.reduce_mean(gen_output)
loss = gen_loss - real_loss
return loss
class WGANGeneratorLoss(tf.keras.losses.Loss):
def __init__(self) -> None:
"""Wasserstein loss for the generator from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).
"""
super().__init__()
def call(self, real_output, gen_output):
loss = -tf.reduce_mean(gen_output)
return loss
class RaLSGANGeneratorLoss(tf.keras.losses.Loss):
def __init__(self) -> None:
"""Loss for Relativistic average Least Square GAN (arXiv:1901.02474).
"""
super().__init__()
def call(self, real_output, fake_output):
real_loss = tf.reduce_mean(
real_output - tf.reduce_mean(fake_output) + 1)**2
fake_loss = tf.reduce_mean(
fake_output - tf.reduce_mean(real_output) - 1)**2
loss = (real_loss + fake_loss) / 2
return loss
class RaLSGANDiscriminatorLoss(tf.keras.losses.Loss):
def __init__(self) -> None:
"""Loss for Relativistic average Least Square GAN (arXiv:1901.02474).
"""
super().__init__()
def call(self, real_output, fake_output):
real_loss = tf.reduce_mean(
real_output - tf.reduce_mean(fake_output) - 1)**2
fake_loss = tf.reduce_mean(
fake_output - tf.reduce_mean(real_output) + 1)**2
loss = (real_loss + fake_loss) / 2
return loss
|
[
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.reduce_mean",
"tensorflow.zeros_like",
"tensorflow.ones_like"
] |
[((289, 341), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (323, 341), True, 'import tensorflow as tf\n'), ((871, 896), 'tensorflow.ones_like', 'tf.ones_like', (['real_output'], {}), '(real_output)\n', (883, 896), True, 'import tensorflow as tf\n'), ((1153, 1179), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake_output'], {}), '(fake_output)\n', (1166, 1179), True, 'import tensorflow as tf\n'), ((1705, 1757), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1739, 1757), True, 'import tensorflow as tf\n'), ((2720, 2747), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real_output'], {}), '(real_output)\n', (2734, 2747), True, 'import tensorflow as tf\n'), ((2838, 2864), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['gen_output'], {}), '(gen_output)\n', (2852, 2864), True, 'import tensorflow as tf\n'), ((2300, 2325), 'tensorflow.ones_like', 'tf.ones_like', (['fake_output'], {}), '(fake_output)\n', (2312, 2325), True, 'import tensorflow as tf\n'), ((3209, 3235), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['gen_output'], {}), '(gen_output)\n', (3223, 3235), True, 'import tensorflow as tf\n'), ((3567, 3594), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake_output'], {}), '(fake_output)\n', (3581, 3594), True, 'import tensorflow as tf\n'), ((3665, 3692), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real_output'], {}), '(real_output)\n', (3679, 3692), True, 'import tensorflow as tf\n'), ((4081, 4108), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake_output'], {}), '(fake_output)\n', (4095, 4108), True, 'import tensorflow as tf\n'), ((4179, 4206), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real_output'], {}), '(real_output)\n', (4193, 4206), True, 'import tensorflow as tf\n')]
|
import sys
import subprocess
import os
runSnifflesScript = "./call_sniffles.sh"
resultDir = "/CGF/Bioinformatics/Production/Wen/20200117_pacbio_snp_call/29461_WGS_cell_line/bam_location_ngmlr/SV/Sniffles"
class ClsSample:
def __init__(self):
self.strName = ""
self.strPath = ""
self.strBAM = ""
def Init(self, strFullPathBAM):
self.strName = os.path.basename(strFullPathBAM).split(".")[0]
self.strPath = os.path.dirname(strFullPathBAM)
self.strBAM = strFullPathBAM
def Print(self):
print("*************")
print("strName:", self.strName)
print("strPath:", self.strPath)
print("strBAM :", self.strBAM)
print("*************")
print()
def SubmitJob(self):
strCurSampleDir = resultDir + "/" + self.strName
if not os.path.exists(strCurSampleDir):
CMD = "mkdir -p " + strCurSampleDir
os.system(CMD)
strCurSampleLogDir = strCurSampleDir + "/Log"
if not os.path.exists(strCurSampleLogDir):
CMD = "mkdir -p " + strCurSampleLogDir
os.system(CMD)
# --> Submit sge job -> Go!
# CMD = ("bash " + runSnifflesScript + " " + "\"" + self.strName + "\" " +
# "\"" + self.strBAM + "\" " +
# "\"" + resultDir + "\"")
# os.system(CMD)
# # <--
QUEUE = "all.q"
CORES = "12"
strLogStdOut = strCurSampleLogDir + "/_call_sv_" + self.strName + ".stdout"
strLogStdErr = strCurSampleLogDir + "/_call_sv_" + self.strName + ".stderr"
if os.path.exists(strLogStdOut):
CMD = "rm " + strLogStdOut
os.system(CMD)
if os.path.exists(strLogStdErr):
CMD = "rm " + strLogStdErr
os.system(CMD)
CMD = ("qsub -cwd -q " + QUEUE + " -pe by_node " + CORES + " " +
"-o " + strLogStdOut + " " +
"-e " + strLogStdErr + " " +
"-N " + "SV.Sniffles." + self.strName + " " +
"-S /bin/bash " + runSnifflesScript + " " + "\"" + self.strName + "\" " +
"\"" + self.strBAM + "\" " +
"\"" + strCurSampleDir + "\" " +
"\"" + CORES + "\"")
print("CMD:", CMD)
print()
os.system(CMD)
print("\n", "***", "\n")
def main():
strDir = sys.argv[1]
#Find all bam in current fastq
CMD = "find " + strDir + " -maxdepth 1 -type f -iname '*.bam'"
vBAM = subprocess.getoutput(CMD).split('\n')
vSample = []
for strBAM in vBAM:
objSample = ClsSample()
objSample.Init(strBAM)
vSample.append(objSample)
for objSample in vSample:
objSample.SubmitJob()
if __name__ == "__main__":
main()
|
[
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"os.system",
"subprocess.getoutput"
] |
[((459, 490), 'os.path.dirname', 'os.path.dirname', (['strFullPathBAM'], {}), '(strFullPathBAM)\n', (474, 490), False, 'import os\n'), ((1754, 1782), 'os.path.exists', 'os.path.exists', (['strLogStdOut'], {}), '(strLogStdOut)\n', (1768, 1782), False, 'import os\n'), ((1861, 1889), 'os.path.exists', 'os.path.exists', (['strLogStdErr'], {}), '(strLogStdErr)\n', (1875, 1889), False, 'import os\n'), ((2660, 2674), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (2669, 2674), False, 'import os\n'), ((863, 894), 'os.path.exists', 'os.path.exists', (['strCurSampleDir'], {}), '(strCurSampleDir)\n', (877, 894), False, 'import os\n'), ((968, 982), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (977, 982), False, 'import os\n'), ((1061, 1095), 'os.path.exists', 'os.path.exists', (['strCurSampleLogDir'], {}), '(strCurSampleLogDir)\n', (1075, 1095), False, 'import os\n'), ((1172, 1186), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (1181, 1186), False, 'import os\n'), ((1835, 1849), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (1844, 1849), False, 'import os\n'), ((1942, 1956), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (1951, 1956), False, 'import os\n'), ((2868, 2893), 'subprocess.getoutput', 'subprocess.getoutput', (['CMD'], {}), '(CMD)\n', (2888, 2893), False, 'import subprocess\n'), ((389, 421), 'os.path.basename', 'os.path.basename', (['strFullPathBAM'], {}), '(strFullPathBAM)\n', (405, 421), False, 'import os\n')]
|
##
# @file electric_overflow.py
# @author <NAME>
# @date Aug 2018
#
import math
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from torch.nn import functional as F
import dreamplace.ops.electric_potential.electric_potential_cpp as electric_potential_cpp
import dreamplace.configure as configure
if configure.compile_configurations["CUDA_FOUND"] == "TRUE":
import dreamplace.ops.electric_potential.electric_potential_cuda as electric_potential_cuda
import pdb
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class ElectricDensityMapFunction(Function):
"""
@brief compute density overflow.
@param ctx pytorch API to store data for backward proporgation
@param pos location of cells, x and then y
@param node_size_x_clamped stretched size, max(bin_size*sqrt2, node_size)
@param node_size_y_clamped stretched size, max(bin_size*sqrt2, node_size)
@param offset_x (stretched size - node_size) / 2
@param offset_y (stretched size - node_size) / 2
@param ratio original area / stretched area
@param initial_density_map density_map for fixed cells
@param target_density target density
@param xl left boundary
@param yl lower boundary
@param xh right boundary
@param yh upper boundary
@param bin_size_x bin width
@param bin_size_x bin height
@param num_movable_nodes number of movable cells
@param num_filler_nodes number of filler cells
@param padding bin padding to boundary of placement region
@param padding_mask padding mask with 0 and 1 to indicate padding bins with padding regions to be 1
@param num_bins_x number of bins in horizontal direction
@param num_bins_y number of bins in vertical direction
@param num_movable_impacted_bins_x number of impacted bins for any movable cell in x direction
@param num_movable_impacted_bins_y number of impacted bins for any movable cell in y direction
@param num_filler_impacted_bins_x number of impacted bins for any filler cell in x direction
@param num_filler_impacted_bins_y number of impacted bins for any filler cell in y direction
@param sorted_node_map the indices of the movable node map
"""
@staticmethod
def forward(
pos,
node_size_x_clamped,
node_size_y_clamped,
offset_x,
offset_y,
ratio,
bin_center_x,
bin_center_y,
initial_density_map,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_filler_nodes,
padding,
padding_mask, # same dimensions as density map, with padding regions to be 1
num_bins_x,
num_bins_y,
num_movable_impacted_bins_x,
num_movable_impacted_bins_y,
num_filler_impacted_bins_x,
num_filler_impacted_bins_y,
deterministic_flag,
sorted_node_map):
if pos.is_cuda:
output = electric_potential_cuda.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag, sorted_node_map)
else:
output = electric_potential_cpp.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag)
density_map = output.view([num_bins_x, num_bins_y])
# set padding density
if padding > 0:
density_map.masked_fill_(padding_mask,
target_density * bin_size_x * bin_size_y)
return density_map
class ElectricOverflow(nn.Module):
def __init__(
self,
node_size_x,
node_size_y,
bin_center_x,
bin_center_y,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_terminals,
num_filler_nodes,
padding,
deterministic_flag, # control whether to use deterministic routine
sorted_node_map,
movable_macro_mask=None):
super(ElectricOverflow, self).__init__()
self.node_size_x = node_size_x
self.node_size_y = node_size_y
self.bin_center_x = bin_center_x
self.bin_center_y = bin_center_y
self.target_density = target_density
self.xl = xl
self.yl = yl
self.xh = xh
self.yh = yh
self.bin_size_x = bin_size_x
self.bin_size_y = bin_size_y
self.num_movable_nodes = num_movable_nodes
self.num_terminals = num_terminals
self.num_filler_nodes = num_filler_nodes
self.padding = padding
self.sorted_node_map = sorted_node_map
self.movable_macro_mask = movable_macro_mask
self.deterministic_flag = deterministic_flag
self.reset()
def reset(self):
sqrt2 = math.sqrt(2)
# clamped means stretch a cell to bin size
# clamped = max(bin_size*sqrt2, node_size)
# offset means half of the stretch size
# ratio means the original area over the stretched area
self.node_size_x_clamped = self.node_size_x.clamp(min=self.bin_size_x *
sqrt2)
self.offset_x = (self.node_size_x - self.node_size_x_clamped).mul(0.5)
self.node_size_y_clamped = self.node_size_y.clamp(min=self.bin_size_y *
sqrt2)
self.offset_y = (self.node_size_y - self.node_size_y_clamped).mul(0.5)
node_areas = self.node_size_x * self.node_size_y
self.ratio = node_areas / (self.node_size_x_clamped *
self.node_size_y_clamped)
# detect movable macros and scale down the density to avoid halos
# the definition of movable macros should be different according to algorithms
self.num_movable_macros = 0
if self.target_density < 1 and self.movable_macro_mask is not None:
self.num_movable_macros = self.movable_macro_mask.sum().data.item()
self.ratio[:self.num_movable_nodes][
self.movable_macro_mask] = self.target_density
# compute maximum impacted bins
self.num_bins_x = int(math.ceil((self.xh - self.xl) / self.bin_size_x))
self.num_bins_y = int(math.ceil((self.yh - self.yl) / self.bin_size_y))
if self.num_movable_nodes:
self.num_movable_impacted_bins_x = int(
((self.node_size_x[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x))
self.num_movable_impacted_bins_y = int(
((self.node_size_y[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y))
else:
self.num_movable_impacted_bins_x = 0
self.num_movable_impacted_bins_y = 0
if self.num_filler_nodes:
self.num_filler_impacted_bins_x = (
(self.node_size_x[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x)
self.num_filler_impacted_bins_y = (
(self.node_size_y[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y)
else:
self.num_filler_impacted_bins_x = 0
self.num_filler_impacted_bins_y = 0
if self.padding > 0:
self.padding_mask = torch.ones(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
self.padding_mask[self.padding:self.num_bins_x - self.padding,
self.padding:self.num_bins_y -
self.padding].fill_(0)
else:
self.padding_mask = torch.zeros(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
# initial density_map due to fixed cells
self.initial_density_map = None
def compute_initial_density_map(self, pos):
if self.num_terminals == 0:
num_fixed_impacted_bins_x = 0
num_fixed_impacted_bins_y = 0
else:
max_size_x = self.node_size_x[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
max_size_y = self.node_size_y[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
num_fixed_impacted_bins_x = ((max_size_x + self.bin_size_x) /
self.bin_size_x).ceil().clamp(
max=self.num_bins_x)
num_fixed_impacted_bins_y = ((max_size_y + self.bin_size_y) /
self.bin_size_y).ceil().clamp(
max=self.num_bins_y)
if pos.is_cuda:
func = electric_potential_cuda.fixed_density_map
else:
func = electric_potential_cpp.fixed_density_map
self.initial_density_map = func(
pos, self.node_size_x, self.node_size_y, self.bin_center_x,
self.bin_center_y, self.xl, self.yl, self.xh, self.yh,
self.bin_size_x, self.bin_size_y, self.num_movable_nodes,
self.num_terminals, self.num_bins_x, self.num_bins_y,
num_fixed_impacted_bins_x, num_fixed_impacted_bins_y,
self.deterministic_flag)
# scale density of fixed macros
self.initial_density_map.mul_(self.target_density)
def forward(self, pos):
if self.initial_density_map is None:
self.compute_initial_density_map(pos)
density_map = ElectricDensityMapFunction.forward(
pos, self.node_size_x_clamped, self.node_size_y_clamped,
self.offset_x, self.offset_y, self.ratio, self.bin_center_x,
self.bin_center_y, self.initial_density_map, self.target_density,
self.xl, self.yl, self.xh, self.yh, self.bin_size_x,
self.bin_size_y, self.num_movable_nodes, self.num_filler_nodes,
self.padding, self.padding_mask, self.num_bins_x, self.num_bins_y,
self.num_movable_impacted_bins_x, self.num_movable_impacted_bins_y,
self.num_filler_impacted_bins_x, self.num_filler_impacted_bins_y,
self.deterministic_flag, self.sorted_node_map)
bin_area = self.bin_size_x * self.bin_size_y
density_cost = (density_map -
self.target_density * bin_area).clamp_(min=0.0).sum().unsqueeze(0)
return density_cost, density_map.max().unsqueeze(0) / bin_area
def plot(plot_count, density_map, padding, name):
"""
density map contour and heat map
"""
density_map = density_map[padding:density_map.shape[0] - padding,
padding:density_map.shape[1] - padding]
print("max density = %g @ %s" %
(np.amax(density_map),
np.unravel_index(np.argmax(density_map), density_map.shape)))
print("mean density = %g" % (np.mean(density_map)))
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(density_map.shape[0])
y = np.arange(density_map.shape[1])
x, y = np.meshgrid(x, y)
# looks like x and y should be swapped
ax.plot_surface(y, x, density_map, alpha=0.8)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('density')
# plt.tight_layout()
plt.savefig(name + ".3d.png")
plt.close()
# plt.clf()
#fig, ax = plt.subplots()
# ax.pcolor(density_map)
# Loop over data dimensions and create text annotations.
# for i in range(density_map.shape[0]):
# for j in range(density_map.shape[1]):
# text = ax.text(j, i, density_map[i, j],
# ha="center", va="center", color="w")
# fig.tight_layout()
#plt.savefig(name+".2d.%d.png" % (plot_count))
# plt.close()
|
[
"torch.ones",
"numpy.meshgrid",
"math.sqrt",
"math.ceil",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.amax",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.arange",
"numpy.mean",
"torch.zeros",
"matplotlib.pyplot.savefig"
] |
[((530, 551), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (544, 551), False, 'import matplotlib\n'), ((12631, 12643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12641, 12643), True, 'import matplotlib.pyplot as plt\n'), ((12687, 12718), 'numpy.arange', 'np.arange', (['density_map.shape[0]'], {}), '(density_map.shape[0])\n', (12696, 12718), True, 'import numpy as np\n'), ((12727, 12758), 'numpy.arange', 'np.arange', (['density_map.shape[1]'], {}), '(density_map.shape[1])\n', (12736, 12758), True, 'import numpy as np\n'), ((12771, 12788), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (12782, 12788), True, 'import numpy as np\n'), ((12988, 13017), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.3d.png')"], {}), "(name + '.3d.png')\n", (12999, 13017), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13033), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13031, 13033), True, 'import matplotlib.pyplot as plt\n'), ((5809, 5821), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5818, 5821), False, 'import math\n'), ((7201, 7249), 'math.ceil', 'math.ceil', (['((self.xh - self.xl) / self.bin_size_x)'], {}), '((self.xh - self.xl) / self.bin_size_x)\n', (7210, 7249), False, 'import math\n'), ((7281, 7329), 'math.ceil', 'math.ceil', (['((self.yh - self.yl) / self.bin_size_y)'], {}), '((self.yh - self.yl) / self.bin_size_y)\n', (7290, 7329), False, 'import math\n'), ((8619, 8719), 'torch.ones', 'torch.ones', (['self.num_bins_x', 'self.num_bins_y'], {'dtype': 'torch.uint8', 'device': 'self.node_size_x.device'}), '(self.num_bins_x, self.num_bins_y, dtype=torch.uint8, device=self\n .node_size_x.device)\n', (8629, 8719), False, 'import torch\n'), ((9079, 9180), 'torch.zeros', 'torch.zeros', (['self.num_bins_x', 'self.num_bins_y'], {'dtype': 'torch.uint8', 'device': 'self.node_size_x.device'}), '(self.num_bins_x, self.num_bins_y, dtype=torch.uint8, device=\n self.node_size_x.device)\n', (9090, 9180), False, 'import torch\n'), ((12597, 12617), 'numpy.mean', 'np.mean', (['density_map'], {}), '(density_map)\n', (12604, 12617), True, 'import numpy as np\n'), ((12469, 12489), 'numpy.amax', 'np.amax', (['density_map'], {}), '(density_map)\n', (12476, 12489), True, 'import numpy as np\n'), ((12519, 12541), 'numpy.argmax', 'np.argmax', (['density_map'], {}), '(density_map)\n', (12528, 12541), True, 'import numpy as np\n')]
|
from django.conf.urls import include, url, patterns
from polls import views
urlpatterns = [
url(r'^home/$', views.home, name='home'),
url(r'^about/$', views.about, name='about'),
]
|
[
"django.conf.urls.url"
] |
[((97, 136), 'django.conf.urls.url', 'url', (['"""^home/$"""', 'views.home'], {'name': '"""home"""'}), "('^home/$', views.home, name='home')\n", (100, 136), False, 'from django.conf.urls import include, url, patterns\n'), ((143, 185), 'django.conf.urls.url', 'url', (['"""^about/$"""', 'views.about'], {'name': '"""about"""'}), "('^about/$', views.about, name='about')\n", (146, 185), False, 'from django.conf.urls import include, url, patterns\n')]
|
# app/robo_advisor.py
import csv
import os
import json
from dotenv import load_dotenv
import requests
from datetime import datetime
now = datetime.now()
datelabel = now.strftime("%d/%m/%Y %H:%M:%S")
load_dotenv()
# utility function to convert float or integer to usd-formatted string (for printing
# ... adapted from: <NAME> project walkthrough https://www.youtube.com/watch?v=UXAVOP1oCog&t=847s
def to_usd(my_price):
return "${0:,.2f}".format(my_price)
api_key = os.environ.get("ALPHAVANTAGE_API_KEY")
#stock = str(input("Which stock do you wish to check? "))
#stock_upper = stock.upper()
symbol = ""
while True:
try:
stock = str(input("Which stock do you wish to check? "))
stock_upper = stock.upper()
symbol = stock_upper
except KeyError:
print("Please enter a valid stock symbol.")
continue
if len(symbol) >= 6:
print("Please enter a valid stock symbol.")
continue
else:
break
#if len(stock_upper) >=1 or 5 >= len(stock_upper):
# symbol = stock_upper
# elif len(stock_upper) > 5:
# print("please enter a valid stock")
# quit()
#else:
# quit()
request_url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}"
response = requests.get(request_url)
parsed_response = json.loads(response.text)
last_refreshed = parsed_response["Meta Data"]["3. Last Refreshed"]
tsd = parsed_response["Time Series (Daily)"]
dates = list(tsd.keys())
latest_day = dates[0]
latest_close = tsd[latest_day]["4. close"]
#max of all high prices
high_prices = []
low_prices = []
for date in dates:
high_price = tsd[date]["2. high"]
low_price = tsd[date]["3. low"]
high_prices.append(float(high_price))
low_prices.append(float(low_price))
recent_high = max(high_prices)
recent_low = min(low_prices)
csv_file_path = os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv")
csv_headers = ["timestamp", "open", "high", "low", "close", "volume"]
with open(csv_file_path, "w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_headers)
writer.writeheader()
for date in dates:
daily_prices = tsd[date]
writer.writerow({
"timestamp": date,
"open": daily_prices["1. open"],
"high": daily_prices["2. high"],
"low": daily_prices["3. low"],
"close": daily_prices["4. close"],
"volume": daily_prices["5. volume"],
})
stock_decision = ""
decision_reason = ""
if float(latest_close) < (1.2 * float(recent_low)):
stock_decision = "Buy!"
decision_reason = "The latest closing price is within 20 percent of the recent low."
else:
stock_decision = "Don't Buy."
decision_reason = "The latest closing price is not within 20 percent of the recent low."
print("-------------------------")
print(f"SELECTED SYMBOL: {symbol}")
print("-------------------------")
print("REQUESTING STOCK MARKET DATA...")
print(f"REQUEST AT: {datelabel}")
print("-------------------------")
print(f"LATEST DAY: {last_refreshed}")
print(f"LATEST CLOSE: {to_usd(float(latest_close))}")
print(f"RECENT HIGH: {to_usd(float(recent_high))}")
print(f"RECENT LOW: {to_usd(float(recent_low))}")
print("-------------------------")
print(f"RECOMMENDATION: {stock_decision}")
print(f"RECOMMENDATION REASON: {decision_reason}")
print("-------------------------")
print(f"WRITING DATA TO CSV: {csv_file_path}...")
print("-------------------------")
print("HAPPY INVESTING!")
print("-------------------------")
|
[
"json.loads",
"os.path.dirname",
"dotenv.load_dotenv",
"os.environ.get",
"requests.get",
"datetime.datetime.now",
"csv.DictWriter"
] |
[((141, 155), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (153, 155), False, 'from datetime import datetime\n'), ((204, 217), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (215, 217), False, 'from dotenv import load_dotenv\n'), ((476, 514), 'os.environ.get', 'os.environ.get', (['"""ALPHAVANTAGE_API_KEY"""'], {}), "('ALPHAVANTAGE_API_KEY')\n", (490, 514), False, 'import os\n'), ((1307, 1332), 'requests.get', 'requests.get', (['request_url'], {}), '(request_url)\n', (1319, 1332), False, 'import requests\n'), ((1352, 1377), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1362, 1377), False, 'import json\n'), ((1912, 1937), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1927, 1937), False, 'import os\n'), ((2095, 2143), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'csv_headers'}), '(csv_file, fieldnames=csv_headers)\n', (2109, 2143), False, 'import csv\n')]
|
#!/usr/bin/env python
"""
Wrapper to ROS publisher.
Author: <NAME>
Date: 05/18
"""
import rospy
class ROSPublisher(object):
def __init__(self, _topic, _message_type, _queue_size=1, rate=10):
"""
ROSPublisher constructor.
:param _topic: string, ROS topic to publish on
:param _message_type: custom message, published on topic
:param queue_size: int, How many messages to queue when publishing. With the default, a subscriber will only take the latest message.
:param rate: int, how often to publish
"""
self.topic = _topic
self.message_type = _message_type
self.pub = rospy.Publisher(self.topic, self.message_type, queue_size=_queue_size)
self.rate = rospy.Rate(rate)
def publish(self, data=None):
"""
Publishing one message on the initialized topic.
:param data: any data required for publishing e.g. when having publisher in subscriber callback
:return:
"""
rospy.loginfo("Message published on topic %s", self.topic)
|
[
"rospy.loginfo",
"rospy.Publisher",
"rospy.Rate"
] |
[((657, 727), 'rospy.Publisher', 'rospy.Publisher', (['self.topic', 'self.message_type'], {'queue_size': '_queue_size'}), '(self.topic, self.message_type, queue_size=_queue_size)\n', (672, 727), False, 'import rospy\n'), ((748, 764), 'rospy.Rate', 'rospy.Rate', (['rate'], {}), '(rate)\n', (758, 764), False, 'import rospy\n'), ((1011, 1069), 'rospy.loginfo', 'rospy.loginfo', (['"""Message published on topic %s"""', 'self.topic'], {}), "('Message published on topic %s', self.topic)\n", (1024, 1069), False, 'import rospy\n')]
|
"""Helper functions and classes for temporary folders."""
import logging
import shutil
from pathlib import Path
from types import TracebackType
from typing import Optional, Type
from .location import Location
LOGGER = logging.getLogger(__name__)
class TmpDir(Location):
"""A temporary folder that can create files or nested temp folders."""
def __init__(self, path: Path):
"""Create a new temporary folder for the given path."""
super().__init__(path)
self._counter = 0
self.cleanup()
self.path.mkdir(parents=True, exist_ok=True)
def __str__(self) -> str:
"""Format the folder as a string."""
return f"Folder at {self.path}"
def __enter__(self) -> 'TmpDir':
"""Context manager entry function."""
return self
# pylint: disable=useless-return
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
"""Context manager exit function. Calls cleanup()."""
self.cleanup()
return None
def new_path(self, prefix: Optional[str] = None) -> Path:
"""
Return a unique path inside the directory. Doesn't create a file or
directory.
"""
name = f"{prefix if prefix else 'tmp'}-{self._inc_and_get_counter():03}"
LOGGER.debug("Creating temp file %s", name)
return self.resolve(Path(name))
def new_subdir(self, prefix: Optional[str] = None) -> 'TmpDir':
"""
Create a new nested temporary folder and return it.
"""
name = f"{prefix if prefix else 'tmp'}-{self._inc_and_get_counter():03}"
sub_path = self.resolve(Path(name))
sub_path.mkdir(parents=True)
LOGGER.debug("Creating temp dir %s at %s", name, sub_path)
return TmpDir(sub_path)
def cleanup(self) -> None:
"""Delete this folder and all contained files."""
LOGGER.debug("Deleting temp folder %s", self.path)
if self.path.resolve().exists():
shutil.rmtree(self.path.resolve())
def _inc_and_get_counter(self) -> int:
"""Get and increment the counter by one."""
counter = self._counter
self._counter += 1
return counter
|
[
"pathlib.Path",
"logging.getLogger"
] |
[((221, 248), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (238, 248), False, 'import logging\n'), ((1503, 1513), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (1507, 1513), False, 'from pathlib import Path\n'), ((1782, 1792), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (1786, 1792), False, 'from pathlib import Path\n')]
|
import socket
from threading import Thread, Lock
from time import time
from .BenchmarkData import BenchmarkData
class UDPServer:
def __init__(self, host, port, benchmark_file_path, chunk_size, ack):
self.__host = host
self.__port = port
self.__running = False
self.__running_lock = Lock()
self.__benchmark_file_path = benchmark_file_path
self.__chunk_size = chunk_size
self.__ack = ack
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.bind((self.__host, self.__port))
print(f"Waiting for clients on {self.__host}:{self.__port}")
self.__set_running(True)
while self.__get_running():
data = sock.recvfrom(self.__chunk_size)
chunk = data[0]
address = data[1]
client_ip = address[0]
handle_client_thread = Thread(target=self.__handle_client, args=(sock, chunk, address, client_ip))
handle_client_thread.start()
def stop(self):
self.__set_running(False)
def __handle_client(self, sock, chunk, address, client_ip):
category = 'udp_server' if self.__ack else 'udp_stream_server'
file_name_parts = self.__benchmark_file_path.split('.')
if len(file_name_parts) == 1:
file_name = f"{self.__benchmark_file_path}_{category}"
else:
file_name = f"{'.'.join(file_name_parts[:-1])}_{category}.{file_name_parts[-1]}"
self.__prepare_new_request(client_ip, file_name, category)
with open(file_name, "ab") as benchmark_file:
benchmark_file.write(chunk)
data = BenchmarkData.get_data(client_ip, category)
total_bytes = data[1]
chunks_count = data[2]
BenchmarkData.add_data(client_ip, category, time(), total_bytes + len(chunk), chunks_count + 1)
if self.__ack:
self.__acknowledge(sock, address)
def __prepare_new_request(self, client_ip, file_name, category):
if BenchmarkData.get_data(client_ip, category) is not None:
return
with open(file_name, "wb") as benchmark_file:
pass
BenchmarkData.add_data(client_ip, category, time(), 0, 0)
def __acknowledge(self, sock, address):
try:
ack_bytes = bytearray()
ack_bytes.append(1)
sock.sendto(ack_bytes, address)
except:
pass
def __set_running(self, running):
self.__running_lock.acquire()
self.__running = running
self.__running_lock.release()
def __get_running(self):
self.__running_lock.acquire()
running = self.__running
self.__running_lock.release()
return running
|
[
"threading.Lock",
"threading.Thread",
"socket.socket",
"time.time"
] |
[((322, 328), 'threading.Lock', 'Lock', ([], {}), '()\n', (326, 328), False, 'from threading import Thread, Lock\n'), ((483, 531), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (496, 531), False, 'import socket\n'), ((2304, 2310), 'time.time', 'time', ([], {}), '()\n', (2308, 2310), False, 'from time import time\n'), ((945, 1020), 'threading.Thread', 'Thread', ([], {'target': 'self.__handle_client', 'args': '(sock, chunk, address, client_ip)'}), '(target=self.__handle_client, args=(sock, chunk, address, client_ip))\n', (951, 1020), False, 'from threading import Thread, Lock\n'), ((1885, 1891), 'time.time', 'time', ([], {}), '()\n', (1889, 1891), False, 'from time import time\n')]
|
from typing import Tuple
from functools import lru_cache
from ..core.base import FeatureExtractorSingleBand
import pandas as pd
import logging
class SupernovaeDetectionFeatureExtractor(FeatureExtractorSingleBand):
@lru_cache(1)
def get_features_keys_without_band(self) -> Tuple[str, ...]:
return ('delta_mag_fid',
'delta_mjd_fid',
'first_mag',
'mean_mag',
'min_mag',
'n_det',
'n_neg',
'n_pos',
'positive_fraction')
@lru_cache(1)
def get_required_keys(self) -> Tuple[str, ...]:
return "isdiffpos", "magnitude", "time", "band"
def compute_feature_in_one_band(self, detections, band, **kwargs):
grouped_detections = detections.groupby(level=0)
return self.compute_feature_in_one_band_from_group(grouped_detections, band, **kwargs)
def compute_feature_in_one_band_from_group(
self, detections, band, **kwargs):
"""
Parameters
----------
detections :class:pandas.`DataFrame`
DataFrame with single band detections of an object.
band :class:int
kwargs Not required.
Returns :class:pandas.`DataFrame`
-------
"""
columns = self.get_features_keys_with_band(band)
def aux_function(oid_detections, **kwargs):
bands = oid_detections['band'].values
if band not in bands:
oid = oid_detections.index.values[0]
logging.debug(
f'extractor=SN detection object={oid} required_cols={self.get_required_keys()} band={band}')
return self.nan_series_in_band(band)
oid_band_detections = oid_detections[bands == band].sort_values('time')
is_diff_pos_mask = oid_band_detections['isdiffpos'] > 0
n_pos = len(oid_band_detections[is_diff_pos_mask])
n_neg = len(oid_band_detections[~is_diff_pos_mask])
mags = oid_band_detections['magnitude'].values
min_mag = mags.min()
first_mag = mags[0]
mjds = oid_band_detections['time'].values
delta_mjd_fid = mjds[-1] - mjds[0]
delta_mag_fid = mags.max() - min_mag
positive_fraction = n_pos/(n_pos + n_neg)
mean_mag = mags.mean()
data = [
delta_mag_fid,
delta_mjd_fid,
first_mag,
mean_mag,
min_mag,
n_neg + n_pos,
n_neg,
n_pos,
positive_fraction
]
sn_det_df = pd.Series(
data=data,
index=columns)
return sn_det_df
sn_det_results = detections.apply(aux_function)
sn_det_results.index.name = 'oid'
return sn_det_results
|
[
"functools.lru_cache",
"pandas.Series"
] |
[((222, 234), 'functools.lru_cache', 'lru_cache', (['(1)'], {}), '(1)\n', (231, 234), False, 'from functools import lru_cache\n'), ((568, 580), 'functools.lru_cache', 'lru_cache', (['(1)'], {}), '(1)\n', (577, 580), False, 'from functools import lru_cache\n'), ((2699, 2734), 'pandas.Series', 'pd.Series', ([], {'data': 'data', 'index': 'columns'}), '(data=data, index=columns)\n', (2708, 2734), True, 'import pandas as pd\n')]
|
#-*- coding: utf-8 -*-
import os
from gluoncv.model_zoo import ssd_512_mobilenet1_0_voc
import sys
sys.path.append("..")
from convert import convert_ssd_model, save_model
if __name__ == "__main__":
if not os.path.exists("tmp"):
os.mkdir("tmp")
net = ssd_512_mobilenet1_0_voc(pretrained=True)
text_net, binary_weights = convert_ssd_model(net, input_shape=(1,3,512,512), to_bgr=True)
save_model(text_net, binary_weights, prefix="tmp/mssd512_voc")
|
[
"sys.path.append",
"os.mkdir",
"os.path.exists",
"convert.save_model",
"gluoncv.model_zoo.ssd_512_mobilenet1_0_voc",
"convert.convert_ssd_model"
] |
[((101, 122), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (116, 122), False, 'import sys\n'), ((271, 312), 'gluoncv.model_zoo.ssd_512_mobilenet1_0_voc', 'ssd_512_mobilenet1_0_voc', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (295, 312), False, 'from gluoncv.model_zoo import ssd_512_mobilenet1_0_voc\n'), ((344, 409), 'convert.convert_ssd_model', 'convert_ssd_model', (['net'], {'input_shape': '(1, 3, 512, 512)', 'to_bgr': '(True)'}), '(net, input_shape=(1, 3, 512, 512), to_bgr=True)\n', (361, 409), False, 'from convert import convert_ssd_model, save_model\n'), ((411, 473), 'convert.save_model', 'save_model', (['text_net', 'binary_weights'], {'prefix': '"""tmp/mssd512_voc"""'}), "(text_net, binary_weights, prefix='tmp/mssd512_voc')\n", (421, 473), False, 'from convert import convert_ssd_model, save_model\n'), ((213, 234), 'os.path.exists', 'os.path.exists', (['"""tmp"""'], {}), "('tmp')\n", (227, 234), False, 'import os\n'), ((244, 259), 'os.mkdir', 'os.mkdir', (['"""tmp"""'], {}), "('tmp')\n", (252, 259), False, 'import os\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize("value, format_, expectation", [
("01-Jan-15 10:00:00 +07:00", "DD-MMM-YY HH:mm:ss Z", "2015-01-01T03:00:00+00:00"), # noqa
("01-01-2015 10:00:00 +07:00", "DD-MM-YYYY HH:mm:ss Z", "2015-01-01T03:00:00+00:00"), # noqa
])
def test_wib_to_utc(value, format_, expectation):
from mishapp_ds.scrape.loaders import datetime_to_utc
assert datetime_to_utc(value, format_) == expectation
|
[
"pytest.mark.parametrize",
"mishapp_ds.scrape.loaders.datetime_to_utc"
] |
[((167, 406), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, format_, expectation"""', "[('01-Jan-15 10:00:00 +07:00', 'DD-MMM-YY HH:mm:ss Z',\n '2015-01-01T03:00:00+00:00'), ('01-01-2015 10:00:00 +07:00',\n 'DD-MM-YYYY HH:mm:ss Z', '2015-01-01T03:00:00+00:00')]"], {}), "('value, format_, expectation', [(\n '01-Jan-15 10:00:00 +07:00', 'DD-MMM-YY HH:mm:ss Z',\n '2015-01-01T03:00:00+00:00'), ('01-01-2015 10:00:00 +07:00',\n 'DD-MM-YYYY HH:mm:ss Z', '2015-01-01T03:00:00+00:00')])\n", (190, 406), False, 'import pytest\n'), ((540, 571), 'mishapp_ds.scrape.loaders.datetime_to_utc', 'datetime_to_utc', (['value', 'format_'], {}), '(value, format_)\n', (555, 571), False, 'from mishapp_ds.scrape.loaders import datetime_to_utc\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from io import StringIO
import os.path
import random
import re
import sys
import yaml
import os_net_config
from os_net_config import cli
from os_net_config import common
from os_net_config import sriov_config
from os_net_config.tests import base
from os_net_config import utils
REALPATH = os.path.dirname(os.path.realpath(__file__))
SAMPLE_BASE = os.path.join(REALPATH, '../../', 'etc',
'os-net-config', 'samples')
class TestCli(base.TestCase):
def setUp(self):
super(TestCli, self).setUp()
rand = str(int(random.random() * 100000))
sriov_config._SRIOV_CONFIG_FILE = '/tmp/sriov_config_' + rand + '.yaml'
common._LOG_FILE = '/tmp/' + rand + 'os_net_config.log'
sys.stdout = StringIO()
sys.stderr = StringIO()
def stub_is_ovs_installed():
return True
self.stub_out('os_net_config.utils.is_ovs_installed',
stub_is_ovs_installed)
def tearDown(self):
super(TestCli, self).tearDown()
if os.path.isfile(common._LOG_FILE):
os.remove(common._LOG_FILE)
if os.path.isfile(sriov_config._SRIOV_CONFIG_FILE):
os.remove(sriov_config._SRIOV_CONFIG_FILE)
def run_cli(self, argstr, exitcodes=(0,)):
for s in [sys.stdout, sys.stderr]:
s.flush()
s.truncate(0)
s.seek(0)
ret = cli.main(argstr.split())
self.assertIn(ret, exitcodes)
sys.stdout.flush()
sys.stderr.flush()
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
return (stdout, stderr)
def stub_get_stored_pci_address(self, ifname, noop):
if 'eth0' in ifname:
return "0000:00:07.0"
if 'eth1' in ifname:
return "0000:00:08.0"
if 'eth2' in ifname:
return "0000:00:09.0"
if 'em3' in ifname:
return "0000:00:03.0"
if 'em1' in ifname:
return "0000:00:01.0"
def test_bond_noop_output(self):
bond_yaml = os.path.join(SAMPLE_BASE, 'bond.yaml')
bond_json = os.path.join(SAMPLE_BASE, 'bond.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % bond_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % bond_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-ctlplane',
'DEVICE=em2',
'DEVICE=em1',
'DEVICE=bond1',
'DEVICETYPE=ovs']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_ivs_noop_output(self):
ivs_yaml = os.path.join(SAMPLE_BASE, 'ivs.yaml')
ivs_json = os.path.join(SAMPLE_BASE, 'ivs.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=nic2',
'DEVICE=nic3',
'DEVICE=api201',
'DEVICE=storage202',
'DEVICETYPE=ivs']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_bridge_noop_output(self):
bridge_yaml = os.path.join(SAMPLE_BASE, 'bridge_dhcp.yaml')
bridge_json = os.path.join(SAMPLE_BASE, 'bridge_dhcp.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=eni --noop '
'--exit-on-validation-errors '
'-c %s' % bridge_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=eni --noop '
'--exit-on-validation-errors '
'-c %s' % bridge_json)
self.assertEqual('', stderr)
sanity_devices = ['iface br-ctlplane inet dhcp',
'iface em1',
'ovs_type OVSBridge']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_vlan_noop_output(self):
vlan_yaml = os.path.join(SAMPLE_BASE, 'bridge_vlan.yaml')
vlan_json = os.path.join(SAMPLE_BASE, 'bridge_vlan.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % vlan_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % vlan_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-ctlplane',
'DEVICE=em1',
'DEVICE=vlan16',
'DEVICETYPE=ovs']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_interface_noop_output(self):
interface_yaml = os.path.join(SAMPLE_BASE, 'interface.yaml')
interface_json = os.path.join(SAMPLE_BASE, 'interface.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % interface_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % interface_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=em1',
'BOOTPROTO=static',
'IPADDR=192.0.2.1']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_bridge_noop_rootfs(self):
for provider in ('ifcfg', 'eni'):
bond_yaml = os.path.join(SAMPLE_BASE, 'bridge_dhcp.yaml')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=%s --noop '
'--exit-on-validation-errors '
'--root-dir=/rootfs '
'-c %s' % (provider, bond_yaml))
self.assertEqual('', stderr)
self.assertIn('File: /rootfs/', stdout_yaml)
def test_interface_noop_detailed_exit_codes(self):
interface_yaml = os.path.join(SAMPLE_BASE, 'interface.yaml')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s --detailed-exit-codes'
% interface_yaml, exitcodes=(2,))
def test_interface_noop_detailed_exit_codes_no_changes(self):
interface_yaml = os.path.join(SAMPLE_BASE, 'interface.yaml')
class TestImpl(os_net_config.NetConfig):
def add_interface(self, interface):
pass
def apply(self, cleanup=False, activate=True):
# this fake implementation returns no changes
return {}
self.stub_out('os_net_config.impl_ifcfg.IfcfgNetConfig', TestImpl)
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s --detailed-exit-codes'
% interface_yaml, exitcodes=(0,))
def test_sriov_noop_output(self):
def test_get_vf_devname(device, vfid):
return device + '_' + str(vfid)
def test_get_pci_address(ifname, noop):
return '0000:79:10.2'
def test_interface_mac(name):
return 'AA:BB:CC:DD:EE:FF'
self.stub_out('os_net_config.utils.get_vf_devname',
test_get_vf_devname)
self.stub_out('os_net_config.utils.get_pci_address',
test_get_pci_address)
self.stub_out('os_net_config.utils.interface_mac',
test_interface_mac)
ivs_yaml = os.path.join(SAMPLE_BASE, 'sriov_pf.yaml')
ivs_json = os.path.join(SAMPLE_BASE, 'sriov_pf.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_yaml)
self.assertEqual('', stderr)
contents = utils.get_file_data(sriov_config._SRIOV_CONFIG_FILE)
sriov_config_yaml = yaml.safe_load(contents)
os.remove(sriov_config._SRIOV_CONFIG_FILE)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_json)
self.assertEqual('', stderr)
contents = utils.get_file_data(sriov_config._SRIOV_CONFIG_FILE)
sriov_config_json = yaml.safe_load(contents)
sanity_devices = ['DEVICE=p2p1',
'DEVICE=p2p1_5',
'DEVICE=p2p1_1',
'DEVICE=br-vfs',
'DEVICE=br-bond',
'TYPE=OVSBridge']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
self.assertCountEqual(sriov_config_yaml, sriov_config_json)
def test_sriov_vf_with_dpdk_noop_output(self):
def test_get_vf_devname(device, vfid):
return device + '_' + str(vfid)
def test_get_pci_address(ifname, noop):
return '0000:79:10.2'
self.stub_out('os_net_config.utils.get_vf_devname',
test_get_vf_devname)
self.stub_out('os_net_config.utils.get_pci_address',
test_get_pci_address)
ivs_yaml = os.path.join(SAMPLE_BASE, 'sriov_pf_ovs_dpdk.yaml')
ivs_json = os.path.join(SAMPLE_BASE, 'sriov_pf_ovs_dpdk.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_yaml)
self.assertEqual('', stderr)
contents = utils.get_file_data(sriov_config._SRIOV_CONFIG_FILE)
sriov_config_yaml = yaml.safe_load(contents)
os.remove(sriov_config._SRIOV_CONFIG_FILE)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_json)
self.assertEqual('', stderr)
contents = utils.get_file_data(sriov_config._SRIOV_CONFIG_FILE)
sriov_config_json = yaml.safe_load(contents)
sanity_devices = ['DEVICE=p2p1',
'DEVICE=p2p1_5',
'DEVICE=br-vfs',
'TYPE=OVSUserBridge',
'DEVICE=dpdk0',
'TYPE=OVSDPDKPort']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
self.assertCountEqual(sriov_config_yaml, sriov_config_json)
def test_ovs_dpdk_bond_noop_output(self):
ivs_yaml = os.path.join(SAMPLE_BASE, 'ovs_dpdk_bond.yaml')
ivs_json = os.path.join(SAMPLE_BASE, 'ovs_dpdk_bond.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-link',
'TYPE=OVSUserBridge',
'DEVICE=dpdkbond0',
'TYPE=OVSDPDKBond']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_nfvswitch_noop_output(self):
nfvswitch_yaml = os.path.join(SAMPLE_BASE, 'nfvswitch.yaml')
nfvswitch_json = os.path.join(SAMPLE_BASE, 'nfvswitch.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % nfvswitch_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % nfvswitch_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=nic2',
'DEVICE=nic3',
'DEVICE=api201',
'DEVICE=storage202',
'DEVICETYPE=nfvswitch']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_ovs_dpdk_noop_output(self):
ivs_yaml = os.path.join(SAMPLE_BASE, 'ovs_dpdk.yaml')
ivs_json = os.path.join(SAMPLE_BASE, 'ovs_dpdk.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % ivs_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=br-link',
'TYPE=OVSUserBridge',
'DEVICE=dpdk0',
'TYPE=OVSDPDKPort']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_nic_mapping_report_output(self):
mapping_report = os.path.join(SAMPLE_BASE, 'mapping_report.yaml')
def dummy_mapped_nics(nic_mapping=None):
return nic_mapping
self.stub_out('os_net_config.objects.mapped_nics', dummy_mapped_nics)
stdout, stderr = self.run_cli('ARG0 --interfaces '
'--exit-on-validation-errors '
'-m %s' % mapping_report)
self.assertEqual('', stderr)
stdout_list = yaml.safe_load(stdout)
self.assertEqual(stdout_list['nic1'], 'em1')
self.assertEqual(stdout_list['nic2'], 'em2')
self.assertEqual(stdout_list['nic3'], 'em4')
self.assertEqual(stdout_list['nic4'], 'em3')
def test_nic_mapping_report_with_explicit_interface_name(self):
mapping_report = os.path.join(SAMPLE_BASE, 'mapping_report.yaml')
def dummy_mapped_nics(nic_mapping=None):
return nic_mapping
self.stub_out('os_net_config.objects.mapped_nics', dummy_mapped_nics)
stdout, stderr = self.run_cli('ARG0 --interfaces em2 em3 '
'--exit-on-validation-errors '
'-m %s' % mapping_report)
self.assertEqual('', stderr)
stdout_list = yaml.safe_load(stdout)
self.assertNotIn('em1', stdout_list.keys())
self.assertNotIn('em1', stdout_list.values())
self.assertEqual(stdout_list['em2'], 'em2')
self.assertEqual(stdout_list['em3'], 'em3')
self.assertNotIn('em4', stdout_list.keys())
self.assertNotIn('em4', stdout_list.values())
def test_contrail_vrouter_noop_output(self):
cvi_yaml = os.path.join(SAMPLE_BASE, 'contrail_vrouter.yaml')
cvi_json = os.path.join(SAMPLE_BASE, 'contrail_vrouter.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % cvi_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % cvi_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=vhost0',
'BIND_INT=em3',
'DEVICETYPE=vhost',
'TYPE=kernel_mode']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_contrail_vrouter_vlan_noop_output(self):
cvi_yaml = os.path.join(SAMPLE_BASE, 'contrail_vrouter_vlan.yaml')
cvi_json = os.path.join(SAMPLE_BASE, 'contrail_vrouter_vlan.json')
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % cvi_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'-c %s' % cvi_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=vhost0',
'BIND_INT=vlan100',
'DEVICETYPE=vhost',
'TYPE=kernel_mode']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
self.assertEqual(stdout_yaml, stdout_json)
def test_contrail_vrouter_dpdk_noop_output(self):
timestamp_rex = re.compile(
(r'contrail_vrouter_dpdk\.(yaml|json)|^[\d]{4}-[\d]{2}-[\d]{2} '
r'[\d]{2}:[\d]{2}:[\d]{2}\.[\d]{3} '),
flags=re.M
)
cvi_yaml = os.path.join(SAMPLE_BASE, 'contrail_vrouter_dpdk.yaml')
cvi_json = os.path.join(SAMPLE_BASE, 'contrail_vrouter_dpdk.json')
self.stub_out('os_net_config.utils.get_stored_pci_address',
self.stub_get_stored_pci_address)
stdout_yaml, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'--debug '
'-c %s' % cvi_yaml)
self.assertEqual('', stderr)
stdout_json, stderr = self.run_cli('ARG0 --provider=ifcfg --noop '
'--exit-on-validation-errors '
'--debug '
'-c %s' % cvi_json)
self.assertEqual('', stderr)
sanity_devices = ['DEVICE=vhost0',
'BIND_INT=0000:00:03.0',
'DEVICETYPE=vhost',
'TYPE=dpdk']
for dev in sanity_devices:
self.assertIn(dev, stdout_yaml)
stdout_yaml = timestamp_rex.sub('', stdout_yaml)
stdout_json = timestamp_rex.sub('', stdout_json)
self.assertEqual(stdout_yaml, stdout_json)
|
[
"io.StringIO",
"os_net_config.utils.get_file_data",
"sys.stderr.getvalue",
"random.random",
"sys.stdout.flush",
"yaml.safe_load",
"sys.stdout.getvalue",
"sys.stderr.flush",
"re.compile"
] |
[((1355, 1365), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1363, 1365), False, 'from io import StringIO\n'), ((1387, 1397), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1395, 1397), False, 'from io import StringIO\n'), ((2078, 2096), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2094, 2096), False, 'import sys\n'), ((2105, 2123), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (2121, 2123), False, 'import sys\n'), ((2141, 2162), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (2160, 2162), False, 'import sys\n'), ((2180, 2201), 'sys.stderr.getvalue', 'sys.stderr.getvalue', ([], {}), '()\n', (2199, 2201), False, 'import sys\n'), ((10227, 10279), 'os_net_config.utils.get_file_data', 'utils.get_file_data', (['sriov_config._SRIOV_CONFIG_FILE'], {}), '(sriov_config._SRIOV_CONFIG_FILE)\n', (10246, 10279), False, 'from os_net_config import utils\n'), ((10308, 10332), 'yaml.safe_load', 'yaml.safe_load', (['contents'], {}), '(contents)\n', (10322, 10332), False, 'import yaml\n'), ((10652, 10704), 'os_net_config.utils.get_file_data', 'utils.get_file_data', (['sriov_config._SRIOV_CONFIG_FILE'], {}), '(sriov_config._SRIOV_CONFIG_FILE)\n', (10671, 10704), False, 'from os_net_config import utils\n'), ((10733, 10757), 'yaml.safe_load', 'yaml.safe_load', (['contents'], {}), '(contents)\n', (10747, 10757), False, 'import yaml\n'), ((12059, 12111), 'os_net_config.utils.get_file_data', 'utils.get_file_data', (['sriov_config._SRIOV_CONFIG_FILE'], {}), '(sriov_config._SRIOV_CONFIG_FILE)\n', (12078, 12111), False, 'from os_net_config import utils\n'), ((12140, 12164), 'yaml.safe_load', 'yaml.safe_load', (['contents'], {}), '(contents)\n', (12154, 12164), False, 'import yaml\n'), ((12484, 12536), 'os_net_config.utils.get_file_data', 'utils.get_file_data', (['sriov_config._SRIOV_CONFIG_FILE'], {}), '(sriov_config._SRIOV_CONFIG_FILE)\n', (12503, 12536), False, 'from os_net_config import utils\n'), ((12565, 12589), 'yaml.safe_load', 'yaml.safe_load', (['contents'], {}), '(contents)\n', (12579, 12589), False, 'import yaml\n'), ((16593, 16615), 'yaml.safe_load', 'yaml.safe_load', (['stdout'], {}), '(stdout)\n', (16607, 16615), False, 'import yaml\n'), ((17390, 17412), 'yaml.safe_load', 'yaml.safe_load', (['stdout'], {}), '(stdout)\n', (17404, 17412), False, 'import yaml\n'), ((19817, 19955), 're.compile', 're.compile', (['"""contrail_vrouter_dpdk\\\\.(yaml|json)|^[\\\\d]{4}-[\\\\d]{2}-[\\\\d]{2} [\\\\d]{2}:[\\\\d]{2}:[\\\\d]{2}\\\\.[\\\\d]{3} """'], {'flags': 're.M'}), "(\n 'contrail_vrouter_dpdk\\\\.(yaml|json)|^[\\\\d]{4}-[\\\\d]{2}-[\\\\d]{2} [\\\\d]{2}:[\\\\d]{2}:[\\\\d]{2}\\\\.[\\\\d]{3} '\n , flags=re.M)\n", (19827, 19955), False, 'import re\n'), ((1163, 1178), 'random.random', 'random.random', ([], {}), '()\n', (1176, 1178), False, 'import random\n')]
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('protectedErr/', views.protected_error, name='protected_error'),
path('accounts/', include('django.contrib.auth.urls')),
path('auth/', include('social_django.urls', namespace='social')),
path('user/update/<int:id>', views.UpdateUser.as_view(),
name='UpdateUser'),
path('projets/', views.projets, name='projets'),
path('projets/add/', views.AjoutProjet.as_view(),
name='AjoutProjet'),
path('projets/update/<int:idProjet>', views.UpdateProjet.as_view(),
name='UpdateProjet'),
path('projets/delete/<int:idProjet>', views.DeleteProjet.as_view(),
name='DeleteProjet'),
path('projets/tache_probable_add/', views.NouvelleTacheProbable.as_view(),
name='NouvelleTacheProbable'),
path('projets/tache_probable_update/<int:idCom>',
views.UpdateTacheProbable.as_view(),
name='UpdateTacheProbable'),
path('projets/tache_probable_delete/<int:idCom>',
views.DeleteTacheProbable.as_view(),
name='DeleteTacheProbable'),
path('clients/add/', views.AjoutClient.as_view(), name='AjoutClient'),
path('clients/update/<int:idClient>', views.UpdateClient.as_view(),
name='UpdateClient'),
path('clients/delete/<int:idClient>', views.DeleteClient.as_view(),
name='DeleteClient'),
path('collaborateurs/', views.collaborateurs, name='collaborateurs'),
path('collaborateurs/add/', views.AjoutCollab.as_view(),
name='AjoutCollab'),
path('collaborateurs/update/<str:pk>', views.UpdateCollab.as_view(),
name='UpdateCollab'),
path('collaborateurs/delete/<str:pk>', views.DeleteCollab.as_view(),
name='DeleteCollab'),
path('collaborateurs/assign/', views.AffectationProjetDateSet.as_view(),
name='AffectationProjetDateSet'),
path('collaborateurs/assign/update/<int:idRP>',
views.UpdateAffectationProjetDateSet.as_view(),
name='UpdateAffectationProjetDateSet'),
path('collaborateurs/assign/delete/<int:idRP>',
views.DeleteAffectation.as_view(),
name='DeleteAffectation'),
path('commandes/', views.commandes, name='commandes'),
path('commandes/add/', views.PasserCommande.as_view(),
name='PasserCommande'),
path('commandes/update/<int:idCom>', views.UpdateCommande.as_view(),
name='UpdateCommande'),
path('commandes/fromtask/<int:idCom>', views.PassCommandFromTask.as_view(),
name='PassCommandFromTask'),
path('commandes/delete/<int:idCom>', views.DeleteCommande.as_view(),
name='DeleteCommande'),
path('autres/', views.autres, name='autres'),
path('autres/assign', views.AffectationAutres.as_view(),
name='AffectationAutres'),
path('autres/assign/update/<int:idRA>',
views.UpdateAffectationAutres.as_view(),
name='UpdateAffectationAutres'),
path('autres/assign/delete/<int:idRA>',
views.DeleteAffectationAutres.as_view(),
name='DeleteAffectationAutres'),
path('data/', views.data, name='data'),
path('history/', views.history, name='history'),
path('history/revert_projet/<str:model>/<int:id>',
views.revert_projet, name='revert_projet'),
path('history/revert_command/<str:model>/<int:id>',
views.revert_command, name='revert_command'),
path('history/revert_collab/<str:model>/<str:id>',
views.revert_collab, name='revert_collab'),
path('history/revert_autres/<str:model>/<int:id>',
views.revert_autres, name='revert_autres'),
path('history/revert_data/<str:model>/<int:id>',
views.revert_data, name='revert_data'),
path('history/revert_data_bis/<str:model>/<int:id>',
views.revert_data_bis, name='revert_data_bis'),
path('history/delete/', views.clean_history, name="clean_history"),
path('charge_update/<int:id>', views.assigned_charges_update,
name="assigned_charges_update")
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((80, 115), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (84, 115), False, 'from django.urls import path, include\n'), ((121, 189), 'django.urls.path', 'path', (['"""protectedErr/"""', 'views.protected_error'], {'name': '"""protected_error"""'}), "('protectedErr/', views.protected_error, name='protected_error')\n", (125, 189), False, 'from django.urls import path, include\n'), ((416, 463), 'django.urls.path', 'path', (['"""projets/"""', 'views.projets'], {'name': '"""projets"""'}), "('projets/', views.projets, name='projets')\n", (420, 463), False, 'from django.urls import path, include\n'), ((1437, 1505), 'django.urls.path', 'path', (['"""collaborateurs/"""', 'views.collaborateurs'], {'name': '"""collaborateurs"""'}), "('collaborateurs/', views.collaborateurs, name='collaborateurs')\n", (1441, 1505), False, 'from django.urls import path, include\n'), ((2221, 2274), 'django.urls.path', 'path', (['"""commandes/"""', 'views.commandes'], {'name': '"""commandes"""'}), "('commandes/', views.commandes, name='commandes')\n", (2225, 2274), False, 'from django.urls import path, include\n'), ((2703, 2747), 'django.urls.path', 'path', (['"""autres/"""', 'views.autres'], {'name': '"""autres"""'}), "('autres/', views.autres, name='autres')\n", (2707, 2747), False, 'from django.urls import path, include\n'), ((3123, 3161), 'django.urls.path', 'path', (['"""data/"""', 'views.data'], {'name': '"""data"""'}), "('data/', views.data, name='data')\n", (3127, 3161), False, 'from django.urls import path, include\n'), ((3168, 3215), 'django.urls.path', 'path', (['"""history/"""', 'views.history'], {'name': '"""history"""'}), "('history/', views.history, name='history')\n", (3172, 3215), False, 'from django.urls import path, include\n'), ((3221, 3318), 'django.urls.path', 'path', (['"""history/revert_projet/<str:model>/<int:id>"""', 'views.revert_projet'], {'name': '"""revert_projet"""'}), "('history/revert_projet/<str:model>/<int:id>', views.revert_projet,\n name='revert_projet')\n", (3225, 3318), False, 'from django.urls import path, include\n'), ((3329, 3429), 'django.urls.path', 'path', (['"""history/revert_command/<str:model>/<int:id>"""', 'views.revert_command'], {'name': '"""revert_command"""'}), "('history/revert_command/<str:model>/<int:id>', views.revert_command,\n name='revert_command')\n", (3333, 3429), False, 'from django.urls import path, include\n'), ((3440, 3537), 'django.urls.path', 'path', (['"""history/revert_collab/<str:model>/<str:id>"""', 'views.revert_collab'], {'name': '"""revert_collab"""'}), "('history/revert_collab/<str:model>/<str:id>', views.revert_collab,\n name='revert_collab')\n", (3444, 3537), False, 'from django.urls import path, include\n'), ((3548, 3645), 'django.urls.path', 'path', (['"""history/revert_autres/<str:model>/<int:id>"""', 'views.revert_autres'], {'name': '"""revert_autres"""'}), "('history/revert_autres/<str:model>/<int:id>', views.revert_autres,\n name='revert_autres')\n", (3552, 3645), False, 'from django.urls import path, include\n'), ((3656, 3748), 'django.urls.path', 'path', (['"""history/revert_data/<str:model>/<int:id>"""', 'views.revert_data'], {'name': '"""revert_data"""'}), "('history/revert_data/<str:model>/<int:id>', views.revert_data, name=\n 'revert_data')\n", (3660, 3748), False, 'from django.urls import path, include\n'), ((3758, 3861), 'django.urls.path', 'path', (['"""history/revert_data_bis/<str:model>/<int:id>"""', 'views.revert_data_bis'], {'name': '"""revert_data_bis"""'}), "('history/revert_data_bis/<str:model>/<int:id>', views.revert_data_bis,\n name='revert_data_bis')\n", (3762, 3861), False, 'from django.urls import path, include\n'), ((3872, 3938), 'django.urls.path', 'path', (['"""history/delete/"""', 'views.clean_history'], {'name': '"""clean_history"""'}), "('history/delete/', views.clean_history, name='clean_history')\n", (3876, 3938), False, 'from django.urls import path, include\n'), ((3945, 4043), 'django.urls.path', 'path', (['"""charge_update/<int:id>"""', 'views.assigned_charges_update'], {'name': '"""assigned_charges_update"""'}), "('charge_update/<int:id>', views.assigned_charges_update, name=\n 'assigned_charges_update')\n", (3949, 4043), False, 'from django.urls import path, include\n'), ((213, 248), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (220, 248), False, 'from django.urls import path, include\n'), ((269, 318), 'django.urls.include', 'include', (['"""social_django.urls"""'], {'namespace': '"""social"""'}), "('social_django.urls', namespace='social')\n", (276, 318), False, 'from django.urls import path, include\n')]
|
import os
from logging import getLogger
from src.constants import CONSTANTS, PLATFORM_ENUM
logger = getLogger(__name__)
class PlatformConfigurations:
platform = os.getenv("PLATFORM", PLATFORM_ENUM.DOCKER.value)
if not PLATFORM_ENUM.has_value(platform):
raise ValueError(f"PLATFORM must be one of {[v.value for v in PLATFORM_ENUM.__members__.values()]}")
class DBConfigurations:
mysql_username = os.getenv("MYSQL_USER")
mysql_password = os.getenv("MYSQL_PASSWORD")
mysql_port = int(os.getenv("MYSQL_PORT", 3306))
mysql_database = os.getenv("MYSQL_DATABASE", "sample_db")
mysql_server = os.getenv("MYSQL_SERVER")
sql_alchemy_database_url = (
f"mysql://{mysql_username}:{mysql_password}@{mysql_server}:{mysql_port}/{mysql_database}?charset=utf8"
)
class APIConfigurations:
title = os.getenv("API_TITLE", "ServingPattern")
description = os.getenv("API_DESCRIPTION", "machine learning system serving patterns")
version = os.getenv("API_VERSION", "0.1")
class ModelConfigurations:
model_filepath = os.getenv("MODEL_FILEPATH")
label_filepath = os.getenv("LABEL_FILEPATH")
outlier_model_filepath = os.getenv("OUTLIER_MODEL_FILEPATH")
outlier_lower_threshold = float(os.getenv("OUTLIER_LOWER_THRESHOLD", 0.0))
logger.info(f"{PlatformConfigurations.__name__}: {PlatformConfigurations.__dict__}")
logger.info(f"{APIConfigurations.__name__}: {APIConfigurations.__dict__}")
logger.info(f"{ModelConfigurations.__name__}: {ModelConfigurations.__dict__}")
|
[
"src.constants.PLATFORM_ENUM.has_value",
"src.constants.PLATFORM_ENUM.__members__.values",
"os.getenv",
"logging.getLogger"
] |
[((102, 121), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'from logging import getLogger\n'), ((169, 218), 'os.getenv', 'os.getenv', (['"""PLATFORM"""', 'PLATFORM_ENUM.DOCKER.value'], {}), "('PLATFORM', PLATFORM_ENUM.DOCKER.value)\n", (178, 218), False, 'import os\n'), ((421, 444), 'os.getenv', 'os.getenv', (['"""MYSQL_USER"""'], {}), "('MYSQL_USER')\n", (430, 444), False, 'import os\n'), ((466, 493), 'os.getenv', 'os.getenv', (['"""MYSQL_PASSWORD"""'], {}), "('MYSQL_PASSWORD')\n", (475, 493), False, 'import os\n'), ((567, 607), 'os.getenv', 'os.getenv', (['"""MYSQL_DATABASE"""', '"""sample_db"""'], {}), "('MYSQL_DATABASE', 'sample_db')\n", (576, 607), False, 'import os\n'), ((627, 652), 'os.getenv', 'os.getenv', (['"""MYSQL_SERVER"""'], {}), "('MYSQL_SERVER')\n", (636, 652), False, 'import os\n'), ((842, 882), 'os.getenv', 'os.getenv', (['"""API_TITLE"""', '"""ServingPattern"""'], {}), "('API_TITLE', 'ServingPattern')\n", (851, 882), False, 'import os\n'), ((901, 973), 'os.getenv', 'os.getenv', (['"""API_DESCRIPTION"""', '"""machine learning system serving patterns"""'], {}), "('API_DESCRIPTION', 'machine learning system serving patterns')\n", (910, 973), False, 'import os\n'), ((988, 1019), 'os.getenv', 'os.getenv', (['"""API_VERSION"""', '"""0.1"""'], {}), "('API_VERSION', '0.1')\n", (997, 1019), False, 'import os\n'), ((1070, 1097), 'os.getenv', 'os.getenv', (['"""MODEL_FILEPATH"""'], {}), "('MODEL_FILEPATH')\n", (1079, 1097), False, 'import os\n'), ((1119, 1146), 'os.getenv', 'os.getenv', (['"""LABEL_FILEPATH"""'], {}), "('LABEL_FILEPATH')\n", (1128, 1146), False, 'import os\n'), ((1176, 1211), 'os.getenv', 'os.getenv', (['"""OUTLIER_MODEL_FILEPATH"""'], {}), "('OUTLIER_MODEL_FILEPATH')\n", (1185, 1211), False, 'import os\n'), ((230, 263), 'src.constants.PLATFORM_ENUM.has_value', 'PLATFORM_ENUM.has_value', (['platform'], {}), '(platform)\n', (253, 263), False, 'from src.constants import CONSTANTS, PLATFORM_ENUM\n'), ((515, 544), 'os.getenv', 'os.getenv', (['"""MYSQL_PORT"""', '(3306)'], {}), "('MYSQL_PORT', 3306)\n", (524, 544), False, 'import os\n'), ((1248, 1289), 'os.getenv', 'os.getenv', (['"""OUTLIER_LOWER_THRESHOLD"""', '(0.0)'], {}), "('OUTLIER_LOWER_THRESHOLD', 0.0)\n", (1257, 1289), False, 'import os\n'), ((335, 369), 'src.constants.PLATFORM_ENUM.__members__.values', 'PLATFORM_ENUM.__members__.values', ([], {}), '()\n', (367, 369), False, 'from src.constants import CONSTANTS, PLATFORM_ENUM\n')]
|
# Generated by Django 2.2.7 on 2020-07-15 07:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0002_auto_20200514_1518'),
]
operations = [
migrations.RemoveField(
model_name='subscriber',
name='last_sent',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((230, 295), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""subscriber"""', 'name': '"""last_sent"""'}), "(model_name='subscriber', name='last_sent')\n", (252, 295), False, 'from django.db import migrations\n')]
|
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
import pandas as pd
import os
import sys
from feature_selection import read_data
def leaveOneOut(df):
# target and data selection
y=df.iloc[:,-1]
X=df.iloc[:,:-1]
y=y.to_numpy()
loo = LeaveOneOut()
loo.get_n_splits(X,y)
split={
"train":[],
"test":[]
}
for train_index, test_index in loo.split(X, y):
print("loo sizes", len(train_index), len(test_index))
split["train"].append(train_index)
split["test"].append(test_index)
print("leave one out ... created ", len(split["train"]))
return split
def stratifiedKfold(df, k=5):
# target and data selection
y=df.iloc[:,-1]
X=df.iloc[:,:-1]
y=y.to_numpy()
skf =StratifiedKFold(k)
skf.get_n_splits(X,y)
split={
"train":[],
"test":[]
}
for train_index, test_index in skf.split(X, y):
split["train"].append(train_index)
split["test"].append(test_index)
print("stratified k fold ... k = ", k)
return split
def createFolder(df, split_dict, path="./kFold"):
access_rights = 0o777
try:
os.mkdir(path, access_rights)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s" % path)
train = split_dict["train"]
test = split_dict["test"]
for i in range(len(train)):
df.iloc[train[i]].to_csv(path+"/train_"+str(i)+".txt",sep="\t")
df.iloc[test[i]].to_csv(path+"/test_"+str(i)+".txt",sep="\t")
def main():
data = read_data(sys.argv[1])
if sys.argv[2] == "stratified":
k=int(sys.argv[3])
split=stratifiedKfold(data,k)
if sys.argv[4]:
createFolder(data, split, path=sys.argv[4])
else:
createFolder(data, split)
elif sys.argv[2] == "loo":
split=leaveOneOut(data)
createFolder(data, split, path="./loo")
# python k_fold.py .\data\new_data.txt [stratified | loo] {k}
if __name__== "__main__":
main()
|
[
"feature_selection.read_data",
"sklearn.model_selection.StratifiedKFold",
"os.mkdir",
"sklearn.model_selection.LeaveOneOut"
] |
[((312, 325), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (323, 325), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((762, 780), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['k'], {}), '(k)\n', (777, 780), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1523, 1545), 'feature_selection.read_data', 'read_data', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1532, 1545), False, 'from feature_selection import read_data\n'), ((1111, 1140), 'os.mkdir', 'os.mkdir', (['path', 'access_rights'], {}), '(path, access_rights)\n', (1119, 1140), False, 'import os\n')]
|
"""
Dependencies:
tensorflow: 1.2.0
matplotlib
numpy
"""
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.set_random_seed(1)
np.random.seed(1)
#fake data
n_data = np.ones((100,2))
x0 = np.random.normal(2*n_data, 1) #class0 x shape = (100, 2))
y0 = np.zeros(100) #class0 y shape = (100, 1))
x1 = np.random.normal(-2*n_data, 1) # class1 x shape=(100, 2)
y1 = np.ones(100) # class1 y shape=(100, 1)
x = np.vstack((x0, x1)) #shape (200, 2)) + some noise
y = np.hstack((y0,y1)) #shape (200, )
#plot data
plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')
plt.show()
tf_x = tf.placeholder(tf.float32, x.shape) #input x
tf_y = tf.placeholder(tf.int32, y.shape)
#neural network layers
l1 = tf.layers.dense(tf_x, 10, tf.nn.relu)
output = tf.layers.dense(l1, 2)
loss = tf.losses.sparse_softmax_cross_entropy(labels = tf_y, logits =output) #compute cost
accuracy = tf.metrics.accuracy(labels = tf.squeeze(tf_y), predictions = tf.argmax(output, axis=1), )[1]
optimizer = tf.train.GradientDescentOptimizer(learning_rate= 0.05)
train_op = optimizer.minimize(loss)
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
plt.ion()
for step in range(100):
#train and net output
_, acc, pred = sess.run([train_op, accuracy, output], {tf_x: x, tf_y:y })
if step %2 == 0:
#plot and show learning process
plt.cla()
plt.scatter(x[:, 0], x[:, 1], c = pred.argmax(1), s = 100, lw = 0, cmap = 'RdYlGn')
plt.text(1.5, -4, 'Accuracy = %.2f'% acc, fontdict = {'size':20, 'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
|
[
"numpy.random.seed",
"numpy.ones",
"tensorflow.local_variables_initializer",
"numpy.random.normal",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"matplotlib.pyplot.cla",
"tensorflow.squeeze",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.hstack",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ion",
"tensorflow.train.GradientDescentOptimizer",
"numpy.vstack",
"tensorflow.losses.sparse_softmax_cross_entropy",
"matplotlib.pyplot.ioff",
"tensorflow.argmax",
"matplotlib.pyplot.scatter",
"tensorflow.layers.dense",
"numpy.zeros"
] |
[((134, 155), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (152, 155), True, 'import tensorflow as tf\n'), ((156, 173), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (170, 173), True, 'import numpy as np\n'), ((195, 212), 'numpy.ones', 'np.ones', (['(100, 2)'], {}), '((100, 2))\n', (202, 212), True, 'import numpy as np\n'), ((217, 248), 'numpy.random.normal', 'np.random.normal', (['(2 * n_data)', '(1)'], {}), '(2 * n_data, 1)\n', (233, 248), True, 'import numpy as np\n'), ((289, 302), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (297, 302), True, 'import numpy as np\n'), ((361, 393), 'numpy.random.normal', 'np.random.normal', (['(-2 * n_data)', '(1)'], {}), '(-2 * n_data, 1)\n', (377, 393), True, 'import numpy as np\n'), ((431, 443), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (438, 443), True, 'import numpy as np\n'), ((500, 519), 'numpy.vstack', 'np.vstack', (['(x0, x1)'], {}), '((x0, x1))\n', (509, 519), True, 'import numpy as np\n'), ((575, 594), 'numpy.hstack', 'np.hstack', (['(y0, y1)'], {}), '((y0, y1))\n', (584, 594), True, 'import numpy as np\n'), ((643, 705), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'y', 's': '(100)', 'lw': '(0)', 'cmap': '"""RdYlGn"""'}), "(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')\n", (654, 705), True, 'import matplotlib.pyplot as plt\n'), ((706, 716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (714, 716), True, 'import matplotlib.pyplot as plt\n'), ((725, 760), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'x.shape'], {}), '(tf.float32, x.shape)\n', (739, 760), True, 'import tensorflow as tf\n'), ((782, 815), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'y.shape'], {}), '(tf.int32, y.shape)\n', (796, 815), True, 'import tensorflow as tf\n'), ((846, 883), 'tensorflow.layers.dense', 'tf.layers.dense', (['tf_x', '(10)', 'tf.nn.relu'], {}), '(tf_x, 10, tf.nn.relu)\n', (861, 883), True, 'import tensorflow as tf\n'), ((893, 915), 'tensorflow.layers.dense', 'tf.layers.dense', (['l1', '(2)'], {}), '(l1, 2)\n', (908, 915), True, 'import tensorflow as tf\n'), ((924, 990), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'tf_y', 'logits': 'output'}), '(labels=tf_y, logits=output)\n', (962, 990), True, 'import tensorflow as tf\n'), ((1135, 1188), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.05)'}), '(learning_rate=0.05)\n', (1168, 1188), True, 'import tensorflow as tf\n'), ((1234, 1246), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1244, 1246), True, 'import tensorflow as tf\n'), ((1354, 1363), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1361, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1803), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1801, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1812, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1299), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1297, 1299), True, 'import tensorflow as tf\n'), ((1301, 1333), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1331, 1333), True, 'import tensorflow as tf\n'), ((1561, 1570), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1568, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1756), 'matplotlib.pyplot.text', 'plt.text', (['(1.5)', '(-4)', "('Accuracy = %.2f' % acc)"], {'fontdict': "{'size': 20, 'color': 'red'}"}), "(1.5, -4, 'Accuracy = %.2f' % acc, fontdict={'size': 20, 'color':\n 'red'})\n", (1679, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1774), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1769, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1075), 'tensorflow.squeeze', 'tf.squeeze', (['tf_y'], {}), '(tf_y)\n', (1069, 1075), True, 'import tensorflow as tf\n'), ((1091, 1116), 'tensorflow.argmax', 'tf.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (1100, 1116), True, 'import tensorflow as tf\n')]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException, TimeoutException, ElementNotVisibleException, \
ElementNotInteractableException
import time
import os
import sys
'''
This code was made to work on FMC version 6.2.3.13 and 6.3.0 and it has not been tested on any other version.
This code might not work on older or newer versions.
'''
# Add to this list supported FMC versions that your code support or ones you tested.
supported_versions = ['6.2.3.13', '6.3.0']
def fmc_login(manager: str, username: str, password: str, version: str) -> webdriver:
'''
This function navigates to the login page, check if the page is asking to confirm the unsecure ssl and click on it
if is there, then login, click on the button to login if there is a session already logged in an and make the
browser full screen.
:param manager: The Address of the Manager/FMC.
:param username: The Manager/FMC login username.
:param password: The <PASSWORD>/<PASSWORD> login password.
:param version: The version of FMC you are using.
:return: webdriver
'''
if version not in supported_versions:
print('This version is not supported or this code have not been tested in this version.')
sys.exit(1)
# This capabilities are used to disabled all notifications and prompts when selenium starts.
capabilities = {
'browserName': 'chrome',
'chromeOptions': {
'useAutomationExtension': False,
'forceDevToolsScreenshot': True,
'args': ['--start-maximized', '--disable-infobars', '--disable-extensions']
}
}
# Make sure you download the correct driver according to your web browser and browser version.
driver = webdriver.Chrome('bins/chromedriver', desired_capabilities=capabilities)
driver.implicitly_wait(1) # This like waits one second before executing anything on the DOM.
if manager.startswith('http'):
driver.get('{}/login.cgi?'.format(manager))
else:
driver.get('https://{}/login.cgi?'.format(manager))
time.sleep(3)
# Used to acknowledge the unsecure ssl certificate if it prompts for it.
try:
advanced_button = driver.find_element(By.XPATH, '/html/body/div/div[2]/button[3]')
except (NoSuchElementException, ElementNotVisibleException, ElementNotInteractableException):
pass
else:
advanced_button.click()
time.sleep(2)
unsafe = driver.find_element(By.XPATH, '/html/body/div/div[3]/p[2]/a')
unsafe.click()
WebDriverWait(driver, 120).until(
expected_conditions.presence_of_element_located(
(By.ID,
'username'))
) # Waits until it finds the username form field and timeout in 120 seconds.
login_form_username = driver.find_element_by_id('username')
login_form_username.send_keys(username)
login_form_password = driver.find_element_by_id('password')
login_form_password.send_keys(password)
driver.maximize_window()
login_form_password.submit()
# Use to accept the notification indicating that there is already a session open for this account.
time.sleep(3)
try:
proceed = driver.find_element(By.XPATH, '//*[@id="confirm_dialog"]/div[2]/input[1]')
except (NoSuchElementException, ElementNotVisibleException, ElementNotInteractableException):
pass
else:
proceed.click()
# return the webdriver as driver
return driver
def logout(driver: webdriver, manager: str) -> None:
'''
This function log you out and gracefully quits everything.
:param driver: The web browser driver.
:param manager: The Address of the Manager/FMC.
:return: None
'''
time.sleep(5)
if manager.startswith('http'):
driver.get('{}/login.cgi?logout=1'.format(manager))
else:
driver.get('https://{}/login.cgi?logout=1'.format(manager))
time.sleep(5)
driver.quit() # Gracefully quits everything.
def disabled_notifications(driver: webdriver, version) -> None:
'''
This function disables the notifications on the FMC to prevent the notification popups from crashing the selenium.
:param driver: The web browser driver.
:param version: The version of FMC you are using.
:return: None
'''
if version == '6.2.3.13':
tasks_icon = '/html/body/div[13]/div[1]/ul/li[12]/div/div[3]'
gear_icon = '/html/body/div[13]/div[1]/ul/li[12]/div/div[4]/div[4]'
notifications_icon = '/html/body/div[13]/div[1]/ul/li[12]/div/div[5]/ul/li/div/div/img'
enabled_image = 'YgAAADuklEQVR42tWV7U9TZxiHnW7TLX'
disabled_image = 'YgAAAC8UlEQVR42tWVXUuaYRjHKxmxsY'
elif version == '6.3.0':
tasks_icon = '/html/body/div[7]/div[2]/div/div[2]/div/ul/li[8]/div/div[3]'
gear_icon = '/html/body/div[7]/div[2]/div/div[2]/div/ul/li[8]/div/div[4]/div[4]'
notifications_icon = '/html/body/div[7]/div[2]/div/div[2]/div/ul/li[8]/div/div[5]/ul/li/div/div/img'
enabled_image = 'YgAAADuklEQVR42tWV7U9TZxiHnW7TLX'
disabled_image = 'YgAAAC8UlEQVR42tWVXUuaYRjHKxmxsY'
else:
tasks_icon = ''
gear_icon = ''
notifications_icon = ''
enabled_image = ''
disabled_image = ''
time.sleep(2)
WebDriverWait(driver, 120).until(
expected_conditions.presence_of_element_located(
(By.XPATH,
tasks_icon))
) # Waits until it finds the tasks icon on the upper right corner and timeout in 120 seconds.
tasks_element = driver.find_element(By.XPATH, tasks_icon)
tasks_element.click()
gear_element = driver.find_element(By.XPATH, gear_icon)
gear_element.click()
notifications_button = driver.find_element(By.XPATH, notifications_icon)
notifications_button_img = notifications_button.get_attribute('src')[64:96]
# This are the enabled and disabled images for notifications.
# In earlier versions or newer versions of the Cisco Management Center this icons might or may not be different.
if notifications_button_img == enabled_image:
print('Disabling notifications!')
print(notifications_button_img)
notifications_button.click()
elif notifications_button_img == disabled_image:
print('Button is already disabled!')
print(notifications_button_img)
tasks_element.click()
time.sleep(2)
# Your custom code goes in here
def my_function(driver: webdriver, manager: str, version: str, *args, **kwargs) -> None:
'''
Start your automation code on here.
:param driver: The web browser driver.
:param manager: The Address of the Manager/FMC.
:param version: The version of FMC you are using.
:return: None
'''
if manager.startswith('http'):
driver.get('{}/platinum/ApplianceInformation.cgi'.format(manager))
else:
driver.get('https://{}/platinum/ApplianceInformation.cgi'.format(manager))
# The address of the manager you want to login to
MANAGER = '127.0.0.1'
# The version of FMC you are using
VERSION = '6.3.0'
# Login on the web interface
DRIVER = fmc_login(MANAGER, os.environ.get('USERNAME'), os.environ.get('PASSWORD'), VERSION)
# Disables the notificatiosn globally
disabled_notifications(DRIVER, VERSION)
# Run your custom function
my_function(DRIVER, MANAGER, VERSION)
# Logout of the web interface and quit everything gracefully
logout(DRIVER, MANAGER)
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"time.sleep",
"os.environ.get",
"selenium.webdriver.Chrome",
"selenium.webdriver.support.ui.WebDriverWait",
"sys.exit"
] |
[((1926, 1998), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""bins/chromedriver"""'], {'desired_capabilities': 'capabilities'}), "('bins/chromedriver', desired_capabilities=capabilities)\n", (1942, 1998), False, 'from selenium import webdriver\n'), ((2258, 2271), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2268, 2271), False, 'import time\n'), ((3338, 3351), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3348, 3351), False, 'import time\n'), ((3907, 3920), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3917, 3920), False, 'import time\n'), ((4098, 4111), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4108, 4111), False, 'import time\n'), ((5449, 5462), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5459, 5462), False, 'import time\n'), ((6562, 6575), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6572, 6575), False, 'import time\n'), ((7312, 7338), 'os.environ.get', 'os.environ.get', (['"""USERNAME"""'], {}), "('USERNAME')\n", (7326, 7338), False, 'import os\n'), ((7340, 7366), 'os.environ.get', 'os.environ.get', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (7354, 7366), False, 'import os\n'), ((1428, 1439), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1436, 1439), False, 'import sys\n'), ((2612, 2625), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2622, 2625), False, 'import time\n'), ((2775, 2843), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'expected_conditions.presence_of_element_located', (["(By.ID, 'username')"], {}), "((By.ID, 'username'))\n", (2822, 2843), False, 'from selenium.webdriver.support import expected_conditions\n'), ((5509, 5580), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'expected_conditions.presence_of_element_located', (['(By.XPATH, tasks_icon)'], {}), '((By.XPATH, tasks_icon))\n', (5556, 5580), False, 'from selenium.webdriver.support import expected_conditions\n'), ((2733, 2759), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(120)'], {}), '(driver, 120)\n', (2746, 2759), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((5467, 5493), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(120)'], {}), '(driver, 120)\n', (5480, 5493), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')]
|
"""
Functions to be used as JobControl jobs
"""
from datetime import datetime
import logging
import os
from jobcontrol.globals import execution_context
from harvester.utils import (get_storage_direct,
jobcontrol_integration, report_progress)
logger = logging.getLogger('harvester_odt.pat_statistica')
def _get_uniqid():
return "{0:%y%m%d-%H%M%S}-{1}".format(datetime.now(), os.getpid())
def _prepare_storage_url(url):
return url.format(id=_get_uniqid())
def get_storage_from_arg(arg):
"""
Get a storage instance from an argument to a function.
This is needed for functions that may be called via
an external tool that doesn't allow passing object instances
directly.
"""
from harvester.ext.storage.base import BaseStorage
if isinstance(arg, BaseStorage):
return arg
if isinstance(arg, basestring):
return get_storage_direct(
_prepare_storage_url(arg), options={})
return get_storage_direct(
_prepare_storage_url(arg['url']),
options=arg.get('conf', None))
def crawl_statistica(storage):
"""Run crawler for statistica"""
import harvester_odt.pat_statistica.crawler
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
harvester_odt.pat_statistica.crawler.crawl_statistica(storage)
return storage
def crawl_statistica_subpro(storage):
"""Run crawler for statistica - subprovinciale"""
import harvester_odt.pat_statistica.crawler
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
harvester_odt.pat_statistica.crawler.crawl_statistica_subpro(storage)
return storage
def crawl_geocatalogo(storage):
"""Run crawler for GeoCatalogo"""
from harvester_odt.pat_geocatalogo.crawler import Geocatalogo
crawler = Geocatalogo('', {'with_resources': False})
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
crawler.fetch_data(storage)
return storage
def crawl_comunweb(storage, url):
"""Run crawler for comunweb
:param storage: Output storage
:param url: base URL of the ComunWeb website
"""
from harvester_odt.comunweb.crawler import ComunWebCrawler
crawler = ComunWebCrawler(url)
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
crawler.fetch_data(storage)
return storage
def convert_statistica_to_ckan(input_storage, storage):
"""Convert data from pat_statistica to Ckan"""
from harvester_odt.pat_statistica.converter \
import convert_statistica_to_ckan
input_storage = get_storage_from_arg(input_storage)
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
convert_statistica_to_ckan(input_storage, storage)
return storage
def convert_statistica_subpro_to_ckan(input_storage, storage):
"""Convert data from pat_statistica_subpro to Ckan"""
from harvester_odt.pat_statistica.converter \
import convert_statistica_subpro_to_ckan
input_storage = get_storage_from_arg(input_storage)
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
convert_statistica_subpro_to_ckan(input_storage, storage)
return storage
def convert_geocatalogo_to_ckan(input_storage, storage):
"""Convert data from pat_geocatalogo to Ckan"""
from harvester_odt.pat_geocatalogo.converter \
import GeoCatalogoToCkan
input_storage = get_storage_from_arg(input_storage)
storage = get_storage_from_arg(storage)
converter = GeoCatalogoToCkan('', {})
with jobcontrol_integration():
converter.convert(input_storage, storage)
return storage
def debugging_job(storage):
"""
Job to be used for debugging purposes.
"""
storage = get_storage_from_arg(storage)
with jobcontrol_integration():
report_progress(None, 0, 1)
job = execution_context.current_job
logger.debug('Running job: {0!r}'.format(job))
deps = list(job.get_deps())
logger.debug('Found {0} dependencies'.format(len(deps)))
for dep in deps:
build = dep.get_latest_successful_build()
if build is None:
logger.debug('Dependency {0!r} has no builds'
.format(dep))
else:
logger.debug('Dependency {0!r} latest build returned {1!r}'
.format(dep, build['retval']))
with jobcontrol_integration():
report_progress(None, 1, 1)
return storage
|
[
"os.getpid",
"harvester_odt.pat_geocatalogo.crawler.Geocatalogo",
"harvester.utils.jobcontrol_integration",
"harvester_odt.pat_statistica.converter.convert_statistica_to_ckan",
"harvester_odt.pat_statistica.converter.convert_statistica_subpro_to_ckan",
"harvester_odt.pat_geocatalogo.converter.GeoCatalogoToCkan",
"harvester.utils.report_progress",
"harvester_odt.comunweb.crawler.ComunWebCrawler",
"datetime.datetime.now",
"logging.getLogger"
] |
[((284, 333), 'logging.getLogger', 'logging.getLogger', (['"""harvester_odt.pat_statistica"""'], {}), "('harvester_odt.pat_statistica')\n", (301, 333), False, 'import logging\n'), ((1854, 1896), 'harvester_odt.pat_geocatalogo.crawler.Geocatalogo', 'Geocatalogo', (['""""""', "{'with_resources': False}"], {}), "('', {'with_resources': False})\n", (1865, 1896), False, 'from harvester_odt.pat_geocatalogo.crawler import Geocatalogo\n'), ((2270, 2290), 'harvester_odt.comunweb.crawler.ComunWebCrawler', 'ComunWebCrawler', (['url'], {}), '(url)\n', (2285, 2290), False, 'from harvester_odt.comunweb.crawler import ComunWebCrawler\n'), ((3597, 3622), 'harvester_odt.pat_geocatalogo.converter.GeoCatalogoToCkan', 'GeoCatalogoToCkan', (['""""""', '{}'], {}), "('', {})\n", (3614, 3622), False, 'from harvester_odt.pat_geocatalogo.converter import GeoCatalogoToCkan\n'), ((397, 411), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (409, 411), False, 'from datetime import datetime\n'), ((413, 424), 'os.getpid', 'os.getpid', ([], {}), '()\n', (422, 424), False, 'import os\n'), ((1265, 1289), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (1287, 1289), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((1578, 1602), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (1600, 1602), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((1950, 1974), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (1972, 1974), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((2344, 2368), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (2366, 2368), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((2737, 2761), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (2759, 2761), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((2771, 2821), 'harvester_odt.pat_statistica.converter.convert_statistica_to_ckan', 'convert_statistica_to_ckan', (['input_storage', 'storage'], {}), '(input_storage, storage)\n', (2797, 2821), False, 'from harvester_odt.pat_statistica.converter import convert_statistica_to_ckan\n'), ((3174, 3198), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (3196, 3198), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((3208, 3265), 'harvester_odt.pat_statistica.converter.convert_statistica_subpro_to_ckan', 'convert_statistica_subpro_to_ckan', (['input_storage', 'storage'], {}), '(input_storage, storage)\n', (3241, 3265), False, 'from harvester_odt.pat_statistica.converter import convert_statistica_subpro_to_ckan\n'), ((3633, 3657), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (3655, 3657), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((3872, 3896), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (3894, 3896), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((3906, 3933), 'harvester.utils.report_progress', 'report_progress', (['None', '(0)', '(1)'], {}), '(None, 0, 1)\n', (3921, 3933), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((4467, 4491), 'harvester.utils.jobcontrol_integration', 'jobcontrol_integration', ([], {}), '()\n', (4489, 4491), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n'), ((4501, 4528), 'harvester.utils.report_progress', 'report_progress', (['None', '(1)', '(1)'], {}), '(None, 1, 1)\n', (4516, 4528), False, 'from harvester.utils import get_storage_direct, jobcontrol_integration, report_progress\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 7 10:51:21 2018
@author: hertta
"""
from shapely.ops import cascaded_union
from copy import deepcopy
import random
from shapely.geometry import LineString
class SchoolDistr:
""" The class representing the school districts """
def __init__(self, school_id, blocks, td_matrix):
# class attribute 1: the school id number
self.school_id = school_id
# class attribute 2: the blocks belonging to the district (as a dict,
# with keys corresponding to the td_matrix keys).
self.blocks = blocks
# class attribute 3: distance matrix (as a dict, with keys
# corresponding to the blocks keys).
self.td_matrix = td_matrix
# class attribute 3: the geometry of the district (shapely polygon)
self.geometry = None
# class attribute 4: the maximum allowed distance from block to the
# district's school
self.max_distance = None
# class attribute 5: the amount of 7-year-olds living inside the
# district
self.students = None
# class attribute 6: the maximum amount of 7-year-olds that the
# district can host
self.student_limit = None
# class attribute 7: the current value of the optimization parameter
self.optimization_value = None
# function call: initiate district attributes
self.initiate_distr_attrs()
# Method for initializing attributes
def initiate_distr_attrs(self):
self.geometry = self.calculate_geometry()
self.max_distance = self.calculate_max_distance()
self.students = self.calculate_student_base()
self.student_limit = self.students*1.20
self.optimization_value = self.calculate_optimization_value()
# Method for updating attributes
def update_distr(self):
self.geometry = self.calculate_geometry()
self.students = self.calculate_student_base()
self.optimization_value = self.calculate_optimization_value()
# Method for calculating the district's geometry as cascaded union of the
# block geometries
def calculate_geometry(self):
geom_list = []
for key, block in self.blocks.items():
geom_list.append(block.geometry)
return cascaded_union(geom_list)
# Method for calculating the district's maximum distance constraint. The
# travel time data must not include infinite distance values.
def calculate_max_distance(self):
maxt = 0
for key, block in self.blocks.items():
ttime = self.td_matrix[key]['walk_d']
if ttime > maxt:
maxt = ttime
return maxt * 1.20
# Method for calculating the current value of the optimization parameter
def calculate_optimization_value(self):
majority_pop = 0
minority_pop = 0
for key, block in self.blocks.items():
majority_pop += block.lang_majority
minority_pop += block.lang_other
return minority_pop/(minority_pop + majority_pop)
# Method for calculating the current amount of 7-year-olds living
# inside the district
def calculate_student_base(self):
student_sum = 0
for key, block in self.blocks.items():
student_sum += block.student_base
return student_sum
# Method for calculating the district's neighbourhood: which blocks
# the district shares a line segment with
def touches_which(self, blocks_dict):
neighbors = []
for key, block in blocks_dict.items():
if type(self.geometry.intersection(block.geometry)) == LineString:
if key not in self.blocks:
neighbors.append(block)
return neighbors
# Method for calculating whether a block is too far for adoption
# Returns True if the block is too far
def is_too_far(self, block):
dist = self.td_matrix[block.block_id]['walk_d']
return dist > self.max_distance
# Method for adopting a selected block
def add_block(self, block):
if block == None:
return
else:
block.school_id = self.school_id
self.blocks[block.block_id] = block
# Method for removing an adopted block
def remove_block(self, block):
if block == None:
return
else:
del self.blocks[block.block_id]
# A method for testing if adopting a block would break another district's
# contiguity. Returns True if contiguity would break.
def break_contiguity(self, block):
blocks_copy = deepcopy(self.blocks)
geom_list = []
for key, item in blocks_copy.items():
geom_list.append(item.geometry)
geom1 = cascaded_union(geom_list)
del blocks_copy[block.block_id]
geom_list = []
for key, item in blocks_copy.items():
geom_list.append(item.geometry)
geom2 = cascaded_union(geom_list)
return type(geom1) != type(geom2)
# A method for selecting the best block in neighbourhood
def select_best_block(self, blockset, districts, global_mean,
global_st_dev):
majority_pop = 0
minority_pop= 0
for key, value in self.blocks.items():
majority_pop += value.lang_majority
minority_pop += value.lang_other
best_block = None
for block in blockset:
# test for rule 2
if block.contains_school == False:
# test for rule 3
if (block.student_base + self.students) <= self.student_limit:
# test for rule 4
if self.is_too_far(block) == False:
current_district = districts[block.school_id]
# test for rule 5
if current_district.break_contiguity(block) == False:
# calculate specs for the block's current district
current_district_majority_pop = 0
current_district_minority_pop= 0
for key, value in current_district.blocks.items():
current_district_majority_pop += \
value.lang_majority
current_district_minority_pop += \
value.lang_other
current_d_new_value = ((current_district_minority_pop
- block.lang_other)/
(current_district_minority_pop
- block.lang_other +
current_district_majority_pop
- block.lang_majority))
current_d_current_value = ((current_district_minority_pop)/
(current_district_minority_pop
+ current_district_majority_pop))
# test the adoption outcome in relation to current state
if best_block == None:
own_new_value1 = ((minority_pop + block.lang_other)/
(minority_pop + block.lang_other +
majority_pop + block.lang_majority))
# test for the rule 6
if (abs(current_d_new_value - global_mean) <=
abs(current_d_current_value - global_mean) or
abs((current_d_current_value - global_mean) -
(self.optimization_value - global_mean)) >
abs((current_d_new_value - global_mean) -
(own_new_value1 - global_mean))):
if (abs(own_new_value1 - global_mean) <
abs(self.optimization_value - global_mean)):
best_block = block
# test the adoption outcome in relation to the current best_block
else:
own_new_value2 = ((minority_pop + block.lang_other)/
(minority_pop + block.lang_other +
majority_pop + block.lang_majority))
current_best = ((minority_pop + best_block.lang_other)/
(minority_pop + best_block.lang_other +
majority_pop + best_block.lang_majority))
# test for the rule 6
if (abs(current_d_new_value - global_mean) <=
abs(current_d_current_value - global_mean) or
abs((current_d_current_value - global_mean) -
(self.optimization_value - global_mean)) >
abs((current_d_new_value - global_mean) -
(own_new_value1 - global_mean))):
if (abs(own_new_value2 - global_mean) <
abs(current_best - global_mean)):
best_block = block
# return the best block
return best_block
# A method for selecting a random block in neighbourhood
def select_random_block(self, blockset, districts):
blocklist = []
for block in blockset:
# test for rule 2
if block.contains_school == False:
# test for rule 3
if (block.student_base + self.students) <= self.student_limit:
# test for rule 4
if self.is_too_far(block) == False:
current_district = districts[block.school_id]
# test for rule 5
if current_district.break_contiguity(block) == False:
blocklist.append(block)
if len(blocklist) > 0:
# generate a random number for selecting a block
randomindx = random.randint(0,len(blocklist)-1)
# return a random block according to the random number generated
return blocklist[randomindx]
class Block:
""" The class representing the residential blocks """
def __init__(self, geometry, block_id, lang_majority, lang_other, student_base,
school_id, contains_school):
# class attribute 1: the geometry of the block (shapely polygon)
self.geometry = geometry
# class attribute 2: block id
self.block_id = block_id
# class attribute 3: the amount of population with Finnish or Swedish as
# their mother tongue
self.lang_majority = lang_majority
# class attribute 4: the amount of population with other languages than Finnish
# or Swedish as their mother tongue
self.lang_other = lang_other
# class attribute 5: the amount of 7-year-olds living in the block
self.student_base = student_base
# class attribute 6: the id of the school district the block currently
# belongs to
self.school_id = school_id
# class attribute 7: True if the block contains a school, otherwise False
self.contains_school = contains_school
|
[
"copy.deepcopy",
"shapely.ops.cascaded_union"
] |
[((2389, 2414), 'shapely.ops.cascaded_union', 'cascaded_union', (['geom_list'], {}), '(geom_list)\n', (2403, 2414), False, 'from shapely.ops import cascaded_union\n'), ((4826, 4847), 'copy.deepcopy', 'deepcopy', (['self.blocks'], {}), '(self.blocks)\n', (4834, 4847), False, 'from copy import deepcopy\n'), ((4986, 5011), 'shapely.ops.cascaded_union', 'cascaded_union', (['geom_list'], {}), '(geom_list)\n', (5000, 5011), False, 'from shapely.ops import cascaded_union\n'), ((5190, 5215), 'shapely.ops.cascaded_union', 'cascaded_union', (['geom_list'], {}), '(geom_list)\n', (5204, 5215), False, 'from shapely.ops import cascaded_union\n')]
|
#!/usr/bin/env python
import time
from optparse import OptionParser
from .component_manager import ComponentManager
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-p', '--profile', dest='profile', default=None)
# parser.add_option('-f', '--file', dest='file', default=None)
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", default=False)
parser.add_option('-y', '--yml', '--yaml', '--config-file', dest='config_file', default=None)
opts, args = parser.parse_args()
if opts.profile == None:
import socket
opts.profile = socket.gethostname().replace('.', '_')
del socket
options = {
'verbose': opts.verbose,
'profile': opts.profile,
'config_file': opts.config_file
}
while True:
cm = ComponentManager(options)
cm.setup()
try:
while cm.running:
cm.update()
except KeyboardInterrupt:
print('KeyboardInterrupt. Quitting.')
cm.destroy()
if not cm.restart:
print(cm.shutdown_message)
break
print('restarting...')
time.sleep(1.0)
|
[
"socket.gethostname",
"optparse.OptionParser",
"time.sleep"
] |
[((157, 171), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (169, 171), False, 'from optparse import OptionParser\n'), ((1179, 1194), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (1189, 1194), False, 'import time\n'), ((614, 634), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (632, 634), False, 'import socket\n')]
|
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.conf import settings
from rest_framework import views
from rest_framework import status as status_codes
import rest_framework.exceptions as drf_exc
from rest_framework.permissions import IsAuthenticated
from pulpcore.app.serializers import ArtifactSerializer
from pulpcore.app.response import OperationPostponedResponse
from pulpcore.tasking.tasks import enqueue_with_reservation
from pulpcore.app import models as pulp_models
from galaxy.api.v2.serializers import collection as serializers
from galaxy.main import models
from galaxy.pulp import tasks
__all__ = [
'UploadCollectionView'
]
class CollectionExistsError(drf_exc.APIException):
status_code = status_codes.HTTP_409_CONFLICT
default_detail = 'Collection already exists.'
default_code = 'collection_exists'
class UploadCollectionView(views.APIView):
permission_classes = (IsAuthenticated, )
def post(self, request, *args, **kwargs):
"""Upload an Ansible Collection."""
serializer = serializers.UploadCollectionSerializer(
data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
# TODO(cutwater): Merge Artifact and UploadCollectionSerializers
# TODO(cutwater): Extract namespace and name from `METADATA.json`
# and validate that collection name matches filename.
namespace = self._validate_namespace(request.user, data)
artifact_data = {'file': request.data['file']}
if serializer.data['sha256'] is not None:
artifact_data['sha256'] = data['sha256']
repository = pulp_models.Repository.objects.get(
name=settings.GALAXY_PULP_REPOSITORY)
artifact = self._save_artifact(artifact_data)
import_task = models.ImportTask.objects.create(
owner=request.user,
state=models.ImportTask.STATE_PENDING,
)
async_result = enqueue_with_reservation(
tasks.import_collection, [],
kwargs={
'artifact_pk': artifact.pk,
'repository_pk': repository.pk,
'namespace_pk': namespace.pk,
'task_id': import_task.id,
})
return OperationPostponedResponse(async_result, request)
def _validate_namespace(self, user, data):
"""Validate that collection namespace exists and user owns it."""
ns_name = data['filename'].namespace
try:
ns = models.Namespace.objects.get(name=ns_name)
except models.Namespace.DoesNotExist:
raise drf_exc.ValidationError(
'Namespace {0} does not exist'.format(ns_name))
if not ns.owners.filter(id=user.id).count():
raise drf_exc.PermissionDenied(
'The namespace listed on your filename must match one of '
'the namespaces you have access to.'
)
return ns
def _save_artifact(self, data):
artifact_serializer = ArtifactSerializer(data=data)
try:
artifact_serializer.is_valid(raise_exception=True)
except drf_exc.ValidationError as e:
error_codes = e.get_codes()
if 'unique' in error_codes.get('non_field_errors', []):
raise CollectionExistsError()
raise
return artifact_serializer.save()
|
[
"galaxy.main.models.Namespace.objects.get",
"pulpcore.app.models.Repository.objects.get",
"pulpcore.app.response.OperationPostponedResponse",
"pulpcore.app.serializers.ArtifactSerializer",
"rest_framework.exceptions.PermissionDenied",
"pulpcore.tasking.tasks.enqueue_with_reservation",
"galaxy.api.v2.serializers.collection.UploadCollectionSerializer",
"galaxy.main.models.ImportTask.objects.create"
] |
[((1682, 1774), 'galaxy.api.v2.serializers.collection.UploadCollectionSerializer', 'serializers.UploadCollectionSerializer', ([], {'data': 'request.data', 'context': "{'request': request}"}), "(data=request.data, context={\n 'request': request})\n", (1720, 1774), True, 'from galaxy.api.v2.serializers import collection as serializers\n'), ((2346, 2418), 'pulpcore.app.models.Repository.objects.get', 'pulp_models.Repository.objects.get', ([], {'name': 'settings.GALAXY_PULP_REPOSITORY'}), '(name=settings.GALAXY_PULP_REPOSITORY)\n', (2380, 2418), True, 'from pulpcore.app import models as pulp_models\n'), ((2510, 2606), 'galaxy.main.models.ImportTask.objects.create', 'models.ImportTask.objects.create', ([], {'owner': 'request.user', 'state': 'models.ImportTask.STATE_PENDING'}), '(owner=request.user, state=models.\n ImportTask.STATE_PENDING)\n', (2542, 2606), False, 'from galaxy.main import models\n'), ((2661, 2849), 'pulpcore.tasking.tasks.enqueue_with_reservation', 'enqueue_with_reservation', (['tasks.import_collection', '[]'], {'kwargs': "{'artifact_pk': artifact.pk, 'repository_pk': repository.pk, 'namespace_pk':\n namespace.pk, 'task_id': import_task.id}"}), "(tasks.import_collection, [], kwargs={'artifact_pk':\n artifact.pk, 'repository_pk': repository.pk, 'namespace_pk': namespace.\n pk, 'task_id': import_task.id})\n", (2685, 2849), False, 'from pulpcore.tasking.tasks import enqueue_with_reservation\n'), ((2960, 3009), 'pulpcore.app.response.OperationPostponedResponse', 'OperationPostponedResponse', (['async_result', 'request'], {}), '(async_result, request)\n', (2986, 3009), False, 'from pulpcore.app.response import OperationPostponedResponse\n'), ((3728, 3757), 'pulpcore.app.serializers.ArtifactSerializer', 'ArtifactSerializer', ([], {'data': 'data'}), '(data=data)\n', (3746, 3757), False, 'from pulpcore.app.serializers import ArtifactSerializer\n'), ((3207, 3249), 'galaxy.main.models.Namespace.objects.get', 'models.Namespace.objects.get', ([], {'name': 'ns_name'}), '(name=ns_name)\n', (3235, 3249), False, 'from galaxy.main import models\n'), ((3475, 3603), 'rest_framework.exceptions.PermissionDenied', 'drf_exc.PermissionDenied', (['"""The namespace listed on your filename must match one of the namespaces you have access to."""'], {}), "(\n 'The namespace listed on your filename must match one of the namespaces you have access to.'\n )\n", (3499, 3603), True, 'import rest_framework.exceptions as drf_exc\n')]
|
from flexx import flx
from flexx import event
import os
from tornado.web import StaticFileHandler
class ScaleImageWidget(flx.Widget):
""" Display an image from a url.
The ``node`` of this widget is an
`<img> <https://developer.mozilla.org/docs/Web/HTML/Element/img>`_
wrapped in a `<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_
(the ``outernode``) to handle sizing.
"""
DEFAULT_MIN_SIZE = 16, 16
_sequence = 0
source = event.StringProp('', settable=True, doc="""
The source of the image, This can be anything that an HTML
img element supports.
""")
stretch = event.BoolProp(False, settable=True, doc="""
Whether the image should stretch to fill all available
space, or maintain its aspect ratio (default).
""")
def _create_dom(self):
global window
outer = window.document.createElement('div')
inner = window.document.createElement('img')
outer.appendChild(inner)
return outer, inner
@event.reaction
def __resize_image(self):
size = self.size
if self.stretch:
self.node.style.maxWidth = None
self.node.style.maxHeight = None
self.node.style.width = size[0] + 'px'
self.node.style.height = size[1] + 'px'
else:
self.node.style.backgroundColor = None
self.node.style.marginLeft = "5%"
self.node.style.marginTop = "5%"
self.node.style.maxWidth = "90%"
self.node.style.maxWidth = "auto"
self.node.style.width = "90%"
self.node.style.height = "auto"
@event.reaction
def __source_changed(self):
self.node.src = self.source
class ClickableImage(flx.Widget):
def init(self, source):
self.src = source
self.img = ScaleImageWidget(source = source, flex=1)
self.img.node.addEventListener("mouseover",
lambda e: self._show_clickable_in())
self.img.node.addEventListener("mouseout",
lambda e: self._show_clickable_out())
def _show_clickable_in(self):
size = self.img.size[0]
p20 = size // 20
self.img.node.style.boxShadow = "0px 0px "+ p20 + "px 2px black"
def _show_clickable_out(self):
self.img.node.style.boxShadow = None
@flx.action
def set_source(self, source):
self.src = source
if self.src == None:
self.img.node.style.visibility = "hidden"
else:
self.img.node.style.visibility = "visible"
self.img.set_source(source)
class ImageGrid(flx.Widget):
def init(self, width=4, height=4,
path=lambda x, y: "/images/starting_image.png",
handler=lambda o, x, y: print(x, y)):
self.width = width
self.height = height
self.path = path
self.handler = handler
self.imageGrid = [[None for idy in range(height)] for idx in range(width)]
with flx.HFix():
for idx in range(width):
with flx.VFix(flex=1):
for idy in range(height):
self.imageGrid[idx][idy] = ClickableImage(path(idx, idy), flex=1)
a, b = idx, idy
self.imageGrid[idx][idy].node.addEventListener("click",
self._on_click_handler(a, b))
def _on_click_handler(self, idx, idy):
return lambda e: self.handler(self, idx, idy)
def path_provider(x, y):
if (x + y) % 2 == 0:
return "/images/starting_image.png"
else:
return "/images/cytosol_image.png"
class FewShot(flx.Widget):
def init(self):
self.selectedImages = []
with flx.TabLayout() as self.tabs:
with flx.HFix(title="selection", flex=1) as self.selector_view:
with flx.VFix() as self.images:
flx.Label(text="Images", flex=(1, 1))
self.imageGrid = ImageGrid(4, 4, path_provider,
lambda o, idx, idy: self.image_click_handler(o, idx, idy),
flex=(1, 9))
self.images.node.style.backgroundColor = "#88888888"
with flx.VFix() as self.selected:
flx.Label(text="Selected", flex=(1, 1))
self.selectedGrid = ImageGrid(4, 4, self.selected_provider,
lambda o, idx, idy: self.selected_click_handler(o, idx, idy),
flex=(1, 9))
with flx.HFix(title="results", flex=1) as self.result_view:
self.resultGrid = ImageGrid(8, 4, path_provider,
flex=(1, 1))
@flx.action
def image_click_handler(self, o, idx, idy):
source = o.imageGrid[idx][idy].src
if (source, idx, idy) not in self.selectedImages:
self.selectedImages.append((source, idx, idy))
length = len(self.selectedImages)
new_position = (
(length - 1) % 4,
(length - 1) // 4
)
self.selectedGrid.imageGrid[new_position[0]][new_position[1]].set_source(source)
@flx.action
def selected_click_handler(self, o, idx, idy):
position = idy * 4 + idx
if position < len(self.selectedImages):
self.selectedImages.pop(position)
self.selectedGrid.imageGrid[idx][idy].set_source(None)
for pos, elem in enumerate(self.selectedImages):
source = elem[0]
new_position = (
pos % 4,
pos // 4
)
self.selectedGrid.imageGrid[new_position[0]][new_position[1]].set_source(source)
for pos in range(len(self.selectedImages), 16):
new_position = (
pos % 4,
pos // 4
)
self.selectedGrid.imageGrid[new_position[0]][new_position[1]].set_source(None)
def selected_provider(self, idx, idy):
return lambda x, y: None
tornado_app = flx.create_server().app
dirname = os.path.expanduser('~/Documents/knoplab/yeastimages_presentation/')
tornado_app.add_handlers(r".*", [
(r"/images/(.*)", StaticFileHandler, {"path": dirname}),
])
app = flx.App(FewShot)
app.launch('browser')
flx.run()
|
[
"flexx.flx.Label",
"flexx.flx.TabLayout",
"flexx.flx.App",
"flexx.event.StringProp",
"flexx.event.BoolProp",
"flexx.flx.run",
"flexx.flx.create_server",
"flexx.flx.VFix",
"os.path.expanduser",
"flexx.flx.HFix"
] |
[((5691, 5758), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/knoplab/yeastimages_presentation/"""'], {}), "('~/Documents/knoplab/yeastimages_presentation/')\n", (5709, 5758), False, 'import os\n'), ((5863, 5879), 'flexx.flx.App', 'flx.App', (['FewShot'], {}), '(FewShot)\n', (5870, 5879), False, 'from flexx import flx\n'), ((5902, 5911), 'flexx.flx.run', 'flx.run', ([], {}), '()\n', (5909, 5911), False, 'from flexx import flx\n'), ((485, 648), 'flexx.event.StringProp', 'event.StringProp', (['""""""'], {'settable': '(True)', 'doc': '"""\n The source of the image, This can be anything that an HTML\n img element supports.\n """'}), '(\'\', settable=True, doc=\n """\n The source of the image, This can be anything that an HTML\n img element supports.\n """\n )\n', (501, 648), False, 'from flexx import event\n'), ((654, 839), 'flexx.event.BoolProp', 'event.BoolProp', (['(False)'], {'settable': '(True)', 'doc': '"""\n Whether the image should stretch to fill all available\n space, or maintain its aspect ratio (default).\n """'}), '(False, settable=True, doc=\n """\n Whether the image should stretch to fill all available\n space, or maintain its aspect ratio (default).\n """\n )\n', (668, 839), False, 'from flexx import event\n'), ((5657, 5676), 'flexx.flx.create_server', 'flx.create_server', ([], {}), '()\n', (5674, 5676), False, 'from flexx import flx\n'), ((2879, 2889), 'flexx.flx.HFix', 'flx.HFix', ([], {}), '()\n', (2887, 2889), False, 'from flexx import flx\n'), ((3520, 3535), 'flexx.flx.TabLayout', 'flx.TabLayout', ([], {}), '()\n', (3533, 3535), False, 'from flexx import flx\n'), ((3561, 3596), 'flexx.flx.HFix', 'flx.HFix', ([], {'title': '"""selection"""', 'flex': '(1)'}), "(title='selection', flex=1)\n", (3569, 3596), False, 'from flexx import flx\n'), ((4299, 4332), 'flexx.flx.HFix', 'flx.HFix', ([], {'title': '"""results"""', 'flex': '(1)'}), "(title='results', flex=1)\n", (4307, 4332), False, 'from flexx import flx\n'), ((2935, 2951), 'flexx.flx.VFix', 'flx.VFix', ([], {'flex': '(1)'}), '(flex=1)\n', (2943, 2951), False, 'from flexx import flx\n'), ((3633, 3643), 'flexx.flx.VFix', 'flx.VFix', ([], {}), '()\n', (3641, 3643), False, 'from flexx import flx\n'), ((3670, 3707), 'flexx.flx.Label', 'flx.Label', ([], {'text': '"""Images"""', 'flex': '(1, 1)'}), "(text='Images', flex=(1, 1))\n", (3679, 3707), False, 'from flexx import flx\n'), ((3984, 3994), 'flexx.flx.VFix', 'flx.VFix', ([], {}), '()\n', (3992, 3994), False, 'from flexx import flx\n'), ((4023, 4062), 'flexx.flx.Label', 'flx.Label', ([], {'text': '"""Selected"""', 'flex': '(1, 1)'}), "(text='Selected', flex=(1, 1))\n", (4032, 4062), False, 'from flexx import flx\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
from google.protobuf import json_format
from common.python.common.consts import ModelType
from common.python.utils import log_utils
from kernel.components.binning.vertfeaturebinning.vert_binning_promoter import VertFeatureBinningPromoter
from kernel.components.binning.vertfeaturebinning.vert_binning_provider import VertFeatureBinningProvider
from kernel.components.featurecalculation.base import filter_factory
from kernel.components.featurecalculation.base.calculation_properties import CalculationProperties, \
CompletedCalculationResults
from kernel.components.featurecalculation.param import FeatureCalculationParam
from kernel.model_base import ModelBase
from kernel.protobuf.generated import feature_calculation_param_pb2, feature_calculation_meta_pb2
from kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable import \
VertFeatureCalculationTransferVariable
from kernel.utils import abnormal_detection
from kernel.utils import consts
from kernel.utils.data_util import get_header
from kernel.utils.io_check import assert_io_num_rows_equal
LOGGER = log_utils.get_logger()
MODEL_PARAM_NAME = 'FeatureCalculationParam'
MODEL_META_NAME = 'FeatureCalculationMeta'
MODEL_NAME = 'VertFeatureCalculation'
class BaseVertFeatureCalculation(ModelBase):
def __init__(self):
super(BaseVertFeatureCalculation, self).__init__()
self.transfer_variable = VertFeatureCalculationTransferVariable()
self.curt_calculate_properties = CalculationProperties()
self.completed_calculation_result = CompletedCalculationResults()
self.schema = None
self.header = None
self.party_name = 'Base'
# Possible previous model
self.binning_model = None
self.static_obj = None
self.model_param = FeatureCalculationParam()
self.meta_dicts = {}
def _init_model(self, params):
self.model_param = params
# self.cols_index = params.calculate_cols
self.filter_methods = params.filter_methods
# self.local_only = params.local_only
def _init_calculate_params(self, data_instances):
if self.schema is None:
self.schema = data_instances.schema
if self.header is not None:
return
self.schema = data_instances.schema
header = get_header(data_instances)
self.header = header
self.curt_calculate_properties.set_header(header)
self.curt_calculate_properties.set_last_left_col_indexes([x for x in range(len(header))])
if self.model_param.calculate_col_indexes == -1:
self.curt_calculate_properties.set_calculate_all_cols()
else:
self.curt_calculate_properties.add_calculate_col_indexes(self.model_param.calculate_col_indexes)
self.curt_calculate_properties.add_calculate_col_names(self.model_param.calculate_names)
self.completed_calculation_result.set_header(header)
self.completed_calculation_result.set_calculate_col_names(self.curt_calculate_properties.calculate_col_names)
self.completed_calculation_result.set_all_left_col_indexes(self.curt_calculate_properties.all_left_col_indexes)
def _get_meta(self):
self.meta_dicts['filter_methods'] = self.filter_methods
self.meta_dicts['cols'] = self.completed_calculation_result.get_calculate_col_names()
self.meta_dicts['need_run'] = self.need_run
meta_protobuf_obj = feature_calculation_meta_pb2.FeatureCalculationMeta(**self.meta_dicts)
return meta_protobuf_obj
def _get_param(self):
LOGGER.debug("curt_calculate_properties.left_col_name: {}, completed_calculation_result: {}".format(
self.curt_calculate_properties.left_col_names, self.completed_calculation_result.all_left_col_names
))
LOGGER.debug("Length of left cols: {}".format(len(self.completed_calculation_result.all_left_col_names)))
# left_cols = {x: True for x in self.curt_calculate_properties.left_col_names}
left_cols = {x: True for x in self.completed_calculation_result.all_left_col_names}
final_left_cols = feature_calculation_param_pb2.LeftCols(
original_cols=self.completed_calculation_result.get_calculate_col_names(),
left_cols=left_cols
)
result_obj = feature_calculation_param_pb2.FeatureCalculationParam(
results=self.completed_calculation_result.filter_results,
col_names=self.completed_calculation_result.get_sorted_col_names(),
)
result_obj_list = []
result_obj_dic = {}
result = json_format.MessageToJson(result_obj)
result_obj_dic["role"] = self.role
result_obj_dic["member_id"] = self.member_id
result_obj_dic["results"] = json.loads(result)["results"]
LOGGER.debug("json_result: {}".format(result_obj_dic))
result_obj_list.append(result_obj_dic)
if self.role == consts.PROVIDER:
print(VertFeatureCalculationTransferVariable().provider_calculate_results.remote(result_obj_dic,
role=consts.PROMOTER,
idx=0))
elif self.role == consts.PROMOTER:
provider_result_obj_dics = VertFeatureCalculationTransferVariable().provider_calculate_results.get(idx=-1)
for provider_result_obj in provider_result_obj_dics:
result_obj_list.append(provider_result_obj)
calculate_results_list = []
for result_obj in result_obj_list:
role = result_obj["role"]
member_id = str(result_obj["member_id"])
new_results = []
results = result_obj["results"]
for result in results:
filter_name = result["filterName"]
feature_values = result["featureValues"]
feature_values = dict(sorted(feature_values.items(), key=lambda e: e[1], reverse=True))
cols = []
values = []
for key in feature_values:
cols.append(key)
values.append(feature_values[key])
new_result = feature_calculation_param_pb2.FeatureCalculationValueResultParam(
filter_name=filter_name,
cols=cols,
values=values,
)
new_results.append(new_result)
new_result_obj = feature_calculation_param_pb2.FeatureCalculationResultParam(
role=role,
member_id=member_id,
results=new_results
)
calculate_results_list.append(new_result_obj)
results = feature_calculation_param_pb2.FeatureCalculationResultsParam(
calculate_results=calculate_results_list
)
return results
def save_data(self):
return self.data_output
def export_model(self):
LOGGER.debug("Model output is : {}".format(self.model_output))
if self.model_output is not None:
LOGGER.debug("model output is already exist, return directly")
return self.model_output
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
self.model_output = result
return result
def load_model(self, model_dict):
if ModelType.TRAIN_MODEL in model_dict.get("model", {}):
# self._parse_need_run(model_dict, MODEL_META_NAME)
LOGGER.debug("Feature calculation need run: {}".format(self.need_run))
if not self.need_run:
return
model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME)
model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME)
self.model_output = {
MODEL_META_NAME: model_meta,
MODEL_PARAM_NAME: model_param
}
header = list(model_param.header)
# self.schema = {'header': header}
self.header = header
self.curt_calculate_properties.set_header(header)
self.completed_calculation_result.set_header(header)
self.curt_calculate_properties.set_last_left_col_indexes([x for x in range(len(header))])
self.curt_calculate_properties.add_calculate_col_names(header)
final_left_cols_names = dict(model_param.final_left_cols.left_cols)
LOGGER.debug("final_left_cols_names: {}".format(final_left_cols_names))
for col_name, _ in final_left_cols_names.items():
self.curt_calculate_properties.add_left_col_name(col_name)
self.completed_calculation_result.add_filter_results(filter_name='conclusion',
calculate_properties=self.curt_calculate_properties)
self.update_curt_calculate_param()
LOGGER.debug("After load model, completed_calculation_result.all_left_col_indexes: {}".format(
self.completed_calculation_result.all_left_col_indexes))
if ModelType.BINNING_MODEL in model_dict.get("model", {}):
LOGGER.debug("Has binning_model, model_dict: {}".format(model_dict))
if self.role == consts.PROMOTER:
self.binning_model = VertFeatureBinningPromoter()
else:
self.binning_model = VertFeatureBinningProvider()
# binning = model_dict['model'][ModelType.BINNING_MODEL]
# Model_Param = binning[0]['Model_Param']
# newProviderResults = []
# if 'providerResults' in Model_Param.keys():
# providerResults = Model_Param['providerResults']
# for providerResult in providerResults:
# binningResult = providerResult['binningResult']
# if binningResult:
# newProviderResults.append(providerResults)
# Model_Param['providerResults'] = newProviderResults
# binning[0]['Model_Param'] = Model_Param
new_model_dict = {'model': model_dict['model'][ModelType.BINNING_MODEL]}
LOGGER.debug(f'model={new_model_dict}')
self.binning_model.load_model(new_model_dict)
@staticmethod
def calculate_cols(instance, left_col_idx):
instance.features = instance.features[left_col_idx]
return instance
def _transfer_data(self, data_instances):
before_one_data = data_instances.first()
f = functools.partial(self.calculate_cols,
left_col_idx=self.completed_calculation_result.all_left_col_indexes)
new_data = data_instances.mapValues(f)
LOGGER.debug("When transfering, all left_col_names: {}".format(
self.completed_calculation_result.all_left_col_names
))
new_data = self.set_schema(new_data, self.completed_calculation_result.all_left_col_names)
one_data = new_data.first()[1]
LOGGER.debug(
"In feature calculation transform, Before transform: {}, length: {} After transform: {}, length: {}".format(
before_one_data[1].features, len(before_one_data[1].features),
one_data.features, len(one_data.features)))
return new_data
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
def set_schema(self, data_instance, header=None):
if header is None:
self.schema["header"] = self.curt_calculate_properties.header
else:
self.schema["header"] = header
data_instance.schema = self.schema
return data_instance
def update_curt_calculate_param(self):
new_calculate_properties = CalculationProperties()
new_calculate_properties.set_header(self.curt_calculate_properties.header)
new_calculate_properties.set_last_left_col_indexes(self.curt_calculate_properties.all_left_col_indexes)
new_calculate_properties.add_calculate_col_names(self.curt_calculate_properties.left_col_names)
LOGGER.debug("In update_curt_calculate_param, header: {}, cols_map: {},"
"last_left_col_indexes: {}, calculate_col_names: {}".format(
new_calculate_properties.header,
new_calculate_properties.col_name_maps,
new_calculate_properties.last_left_col_indexes,
new_calculate_properties.calculate_col_names
))
self.curt_calculate_properties = new_calculate_properties
def _filter(self, data_instances, method, suffix):
this_filter = filter_factory.get_filter(filter_name=method, model_param=self.model_param, role=self.role)
this_filter.set_calculation_properties(self.curt_calculate_properties)
this_filter.set_statics_obj(self.static_obj)
this_filter.set_binning_obj(self.binning_model)
this_filter.set_transfer_variable(self.transfer_variable)
self.curt_calculate_properties = this_filter.fit(data_instances, suffix).calculation_properties
provider_calculate_properties = getattr(this_filter, 'provider_calculation_properties', None)
LOGGER.debug("method: {}, provider_calculate_properties: {}".format(
method, provider_calculate_properties))
self.completed_calculation_result.add_filter_results(filter_name=method,
calculate_properties=self.curt_calculate_properties,
provider_calculate_properties=provider_calculate_properties)
LOGGER.debug("method: {}, calculation_cols: {}, left_cols: {}".format(
method, self.curt_calculate_properties.calculate_col_names, self.curt_calculate_properties.left_col_names))
self.update_curt_calculate_param()
LOGGER.debug("After updated, method: {}, calculation_cols: {}, left_cols: {}".format(
method, self.curt_calculate_properties.calculate_col_names, self.curt_calculate_properties.left_col_names))
self.meta_dicts = this_filter.get_meta_obj(self.meta_dicts)
def fit(self, data_instances):
LOGGER.info("Start Vert Calculation Fit and transform.")
self._abnormal_detection(data_instances)
self._init_calculate_params(data_instances)
if len(self.curt_calculate_properties.calculate_col_indexes) == 0:
LOGGER.warning("None of columns has been set to calculat")
else:
for filter_idx, method in enumerate(self.filter_methods):
self._filter(data_instances, method, suffix=str(filter_idx))
new_data = self._transfer_data(data_instances)
LOGGER.info("Finish Vert Calculation Fit and transform.")
return new_data
@assert_io_num_rows_equal
def transform(self, data_instances):
self._abnormal_detection(data_instances)
self._init_calculate_params(data_instances)
new_data = self._transfer_data(data_instances)
return new_data
|
[
"functools.partial",
"kernel.utils.abnormal_detection.empty_table_detection",
"kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable.VertFeatureCalculationTransferVariable",
"json.loads",
"kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationValueResultParam",
"kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationResultParam",
"kernel.components.featurecalculation.base.calculation_properties.CompletedCalculationResults",
"kernel.protobuf.generated.feature_calculation_meta_pb2.FeatureCalculationMeta",
"common.python.utils.log_utils.get_logger",
"kernel.utils.data_util.get_header",
"kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationResultsParam",
"kernel.components.featurecalculation.param.FeatureCalculationParam",
"google.protobuf.json_format.MessageToJson",
"kernel.components.featurecalculation.base.filter_factory.get_filter",
"kernel.components.featurecalculation.base.calculation_properties.CalculationProperties",
"kernel.components.binning.vertfeaturebinning.vert_binning_provider.VertFeatureBinningProvider",
"kernel.utils.abnormal_detection.empty_feature_detection",
"kernel.components.binning.vertfeaturebinning.vert_binning_promoter.VertFeatureBinningPromoter"
] |
[((2385, 2407), 'common.python.utils.log_utils.get_logger', 'log_utils.get_logger', ([], {}), '()\n', (2405, 2407), False, 'from common.python.utils import log_utils\n'), ((2698, 2738), 'kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable.VertFeatureCalculationTransferVariable', 'VertFeatureCalculationTransferVariable', ([], {}), '()\n', (2736, 2738), False, 'from kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable import VertFeatureCalculationTransferVariable\n'), ((2781, 2804), 'kernel.components.featurecalculation.base.calculation_properties.CalculationProperties', 'CalculationProperties', ([], {}), '()\n', (2802, 2804), False, 'from kernel.components.featurecalculation.base.calculation_properties import CalculationProperties, CompletedCalculationResults\n'), ((2849, 2878), 'kernel.components.featurecalculation.base.calculation_properties.CompletedCalculationResults', 'CompletedCalculationResults', ([], {}), '()\n', (2876, 2878), False, 'from kernel.components.featurecalculation.base.calculation_properties import CalculationProperties, CompletedCalculationResults\n'), ((3093, 3118), 'kernel.components.featurecalculation.param.FeatureCalculationParam', 'FeatureCalculationParam', ([], {}), '()\n', (3116, 3118), False, 'from kernel.components.featurecalculation.param import FeatureCalculationParam\n'), ((3618, 3644), 'kernel.utils.data_util.get_header', 'get_header', (['data_instances'], {}), '(data_instances)\n', (3628, 3644), False, 'from kernel.utils.data_util import get_header\n'), ((4738, 4808), 'kernel.protobuf.generated.feature_calculation_meta_pb2.FeatureCalculationMeta', 'feature_calculation_meta_pb2.FeatureCalculationMeta', ([], {}), '(**self.meta_dicts)\n', (4789, 4808), False, 'from kernel.protobuf.generated import feature_calculation_param_pb2, feature_calculation_meta_pb2\n'), ((5901, 5938), 'google.protobuf.json_format.MessageToJson', 'json_format.MessageToJson', (['result_obj'], {}), '(result_obj)\n', (5926, 5938), False, 'from google.protobuf import json_format\n'), ((8099, 8206), 'kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationResultsParam', 'feature_calculation_param_pb2.FeatureCalculationResultsParam', ([], {'calculate_results': 'calculate_results_list'}), '(calculate_results\n =calculate_results_list)\n', (8159, 8206), False, 'from kernel.protobuf.generated import feature_calculation_param_pb2, feature_calculation_meta_pb2\n'), ((12042, 12154), 'functools.partial', 'functools.partial', (['self.calculate_cols'], {'left_col_idx': 'self.completed_calculation_result.all_left_col_indexes'}), '(self.calculate_cols, left_col_idx=self.\n completed_calculation_result.all_left_col_indexes)\n', (12059, 12154), False, 'import functools\n'), ((12956, 13012), 'kernel.utils.abnormal_detection.empty_table_detection', 'abnormal_detection.empty_table_detection', (['data_instances'], {}), '(data_instances)\n', (12996, 13012), False, 'from kernel.utils import abnormal_detection\n'), ((13021, 13079), 'kernel.utils.abnormal_detection.empty_feature_detection', 'abnormal_detection.empty_feature_detection', (['data_instances'], {}), '(data_instances)\n', (13063, 13079), False, 'from kernel.utils import abnormal_detection\n'), ((13444, 13467), 'kernel.components.featurecalculation.base.calculation_properties.CalculationProperties', 'CalculationProperties', ([], {}), '()\n', (13465, 13467), False, 'from kernel.components.featurecalculation.base.calculation_properties import CalculationProperties, CompletedCalculationResults\n'), ((14299, 14394), 'kernel.components.featurecalculation.base.filter_factory.get_filter', 'filter_factory.get_filter', ([], {'filter_name': 'method', 'model_param': 'self.model_param', 'role': 'self.role'}), '(filter_name=method, model_param=self.model_param,\n role=self.role)\n', (14324, 14394), False, 'from kernel.components.featurecalculation.base import filter_factory\n'), ((6071, 6089), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (6081, 6089), False, 'import json\n'), ((7847, 7963), 'kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationResultParam', 'feature_calculation_param_pb2.FeatureCalculationResultParam', ([], {'role': 'role', 'member_id': 'member_id', 'results': 'new_results'}), '(role=role,\n member_id=member_id, results=new_results)\n', (7906, 7963), False, 'from kernel.protobuf.generated import feature_calculation_param_pb2, feature_calculation_meta_pb2\n'), ((7574, 7694), 'kernel.protobuf.generated.feature_calculation_param_pb2.FeatureCalculationValueResultParam', 'feature_calculation_param_pb2.FeatureCalculationValueResultParam', ([], {'filter_name': 'filter_name', 'cols': 'cols', 'values': 'values'}), '(filter_name\n =filter_name, cols=cols, values=values)\n', (7638, 7694), False, 'from kernel.protobuf.generated import feature_calculation_param_pb2, feature_calculation_meta_pb2\n'), ((10827, 10855), 'kernel.components.binning.vertfeaturebinning.vert_binning_promoter.VertFeatureBinningPromoter', 'VertFeatureBinningPromoter', ([], {}), '()\n', (10853, 10855), False, 'from kernel.components.binning.vertfeaturebinning.vert_binning_promoter import VertFeatureBinningPromoter\n'), ((10911, 10939), 'kernel.components.binning.vertfeaturebinning.vert_binning_provider.VertFeatureBinningProvider', 'VertFeatureBinningProvider', ([], {}), '()\n', (10937, 10939), False, 'from kernel.components.binning.vertfeaturebinning.vert_binning_provider import VertFeatureBinningProvider\n'), ((6270, 6310), 'kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable.VertFeatureCalculationTransferVariable', 'VertFeatureCalculationTransferVariable', ([], {}), '()\n', (6308, 6310), False, 'from kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable import VertFeatureCalculationTransferVariable\n'), ((6659, 6699), 'kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable.VertFeatureCalculationTransferVariable', 'VertFeatureCalculationTransferVariable', ([], {}), '()\n', (6697, 6699), False, 'from kernel.transfer.variables.transfer_class.vert_feature_calculation_transfer_variable import VertFeatureCalculationTransferVariable\n')]
|
# Generated by Django 3.1 on 2020-08-07 04:53
from django.db import migrations, models
def set_images_names(apps, schema_editor):
UploadImage = apps.get_model('resizer', 'UploadImage')
for image in UploadImage.objects.all():
image_name = image.original_image.name.split('/')[-1]
image.image_name = image_name
image.save()
class Migration(migrations.Migration):
dependencies = [
('resizer', '0002_auto_20200807_0444'),
]
operations = [
migrations.AddField(
model_name='uploadimage',
name='image_name',
field=models.CharField(default='', max_length=128),
),
migrations.RunPython(set_images_names),
]
|
[
"django.db.migrations.RunPython",
"django.db.models.CharField"
] |
[((675, 713), 'django.db.migrations.RunPython', 'migrations.RunPython', (['set_images_names'], {}), '(set_images_names)\n', (695, 713), False, 'from django.db import migrations, models\n'), ((610, 654), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(128)'}), "(default='', max_length=128)\n", (626, 654), False, 'from django.db import migrations, models\n')]
|
"""Created comments
Revision ID: fa4f694e986a
Revises: <KEY>
Create Date: 2021-08-16 21:48:43.079233
"""
# revision identifiers, used by Alembic.
revision = 'fa4f694e986a'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('time', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('pitches', 'time')
# ### end Alembic commands ###
|
[
"alembic.op.drop_column",
"sqlalchemy.DateTime"
] |
[((532, 565), 'alembic.op.drop_column', 'op.drop_column', (['"""pitches"""', '"""time"""'], {}), "('pitches', 'time')\n", (546, 565), False, 'from alembic import op\n'), ((377, 390), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (388, 390), True, 'import sqlalchemy as sa\n')]
|
from unittest import TestCase
from flow_py_sdk import AccountKey, SignAlgo, HashAlgo
from flow_py_sdk.proto.flow.entities import AccountKey as ProtoAccountKey
class TestAccountKey(TestCase):
def test_rlp(self):
expected_rlp_hex = "f847b840c51c02aa382d8d382a121178de8ac97eb6a562a1008660669ab6a220c96fce76e1d392b0c156380ae713b0aa18ad9cff7b85bcc44a9eb43fcddb467f456f0ec803038203e8"
key = AccountKey(
public_key=bytes.fromhex(
"<KEY>"
),
sign_algo=SignAlgo.ECDSA_secp256k1,
hash_algo=HashAlgo.SHA3_256,
weight=AccountKey.weight_threshold,
)
rlp = key.rlp()
self.assertEqual(expected_rlp_hex, rlp.hex())
def test_hex(self):
expected_rlp_hex = "f847b840c51c02aa382d8d382a121178de8ac97eb6a562a1008660669ab6a220c96fce76e1d392b0c156380ae713b0aa18ad9cff7b85bcc44a9eb43fcddb467f456f0ec803038203e8"
key = AccountKey(
public_key=bytes.fromhex(
"<KEY>"
),
sign_algo=SignAlgo.ECDSA_secp256k1,
hash_algo=HashAlgo.SHA3_256,
weight=AccountKey.weight_threshold,
)
rlp_hex = key.hex()
self.assertEqual(expected_rlp_hex, rlp_hex)
def test_from_proto(self):
proto_account_key = ProtoAccountKey()
proto_account_key.sign_algo = 2
proto_account_key.hash_algo = 1
AccountKey.from_proto(proto_account_key)
|
[
"flow_py_sdk.AccountKey.from_proto",
"flow_py_sdk.proto.flow.entities.AccountKey"
] |
[((1317, 1334), 'flow_py_sdk.proto.flow.entities.AccountKey', 'ProtoAccountKey', ([], {}), '()\n', (1332, 1334), True, 'from flow_py_sdk.proto.flow.entities import AccountKey as ProtoAccountKey\n'), ((1424, 1464), 'flow_py_sdk.AccountKey.from_proto', 'AccountKey.from_proto', (['proto_account_key'], {}), '(proto_account_key)\n', (1445, 1464), False, 'from flow_py_sdk import AccountKey, SignAlgo, HashAlgo\n')]
|
# -*- coding: UTF-8 -*-
"""PyRamen Homework Starter."""
# @TODO: Import libraries
import csv
from pathlib import Path
# @TODO: Set file paths for menu_data.csv and sales_data.csv
menu_filepath = Path('')
sales_filepath = Path('')
# @TODO: Initialize list objects to hold our menu and sales data
menu = []
sales = []
# @TODO: Read in the menu data into the menu list
# @TODO: Read in the sales data into the sales list
# @TODO: Initialize dict object to hold our key-value pairs of items and metrics
report = {}
# Initialize a row counter variable
row_count = 0
# @TODO: Loop over every row in the sales list object
# Line_Item_ID,Date,Credit_Card_Number,Quantity,Menu_Item
# @TODO: Initialize sales data variables
# @TODO:
# If the item value not in the report, add it as a new entry with initialized metrics
# Naming convention allows the keys to be ordered in logical fashion, count, revenue, cost, profit
|
[
"pathlib.Path"
] |
[((197, 205), 'pathlib.Path', 'Path', (['""""""'], {}), "('')\n", (201, 205), False, 'from pathlib import Path\n'), ((223, 231), 'pathlib.Path', 'Path', (['""""""'], {}), "('')\n", (227, 231), False, 'from pathlib import Path\n')]
|
'''
Collection of shared tools for appengine page rendering.
'''
# My modules
from macro.render.defs import *
from macro.render.util import render_template
from macro.data.appengine.savedmacro import SavedMacroOps
# Generate a search results page.
def generate_search_page(path, terms, page, sort, page_size=DEF_SEARCH_RESULTS):
'''
Generate a search results page.
'''
error = None
# Make sure page is a number, failing on bad input
prev_page = None
if not page:
page = 1
else:
page = int(page)
prev_page = page - 1
# Do the search
# TODO: Add column sort
results = []
is_next_page = False
if (len(terms) < SINGLE_TAG_MAX_LENGTH):
(results, is_next_page) = SavedMacroOps.search(terms, page=page, num=page_size, sort=sort)
else:
error = "Query term too long."
terms = terms[:SINGLE_TAG_MAX_LENGTH] + "..."
# If the number of results is less than that of page_size,
# then there is no next page.
next_page = None
if is_next_page: next_page = page + 1
# If there are no results, add an error.
if not error and len(results) == 0:
error = "No results found."
# TODO: Hook up template controls to sort results.
# TODO: Hook up template controls to page forward/back.
# Return generated search page.
return render_template('base.template',
{'query' : terms,
'content': render_template('search.template',
{'search_error' : error,
'curr_version' : "%s.%s.%s" % (MAJOR_VERSION,
MINOR_VERSION,
PATCH_VERSION),
'query' : terms,
'q_esc' : FORM_QUERY_ESC,
'results' : results,
'sort' : sort,
'page_var' : FORM_SEARCH_PAGE,
# Only give a prev page if we're over page 1.
'prev_page' : prev_page,
'page' : page,
'next_page' : next_page,
},
path)},
path)
|
[
"macro.render.util.render_template",
"macro.data.appengine.savedmacro.SavedMacroOps.search"
] |
[((784, 848), 'macro.data.appengine.savedmacro.SavedMacroOps.search', 'SavedMacroOps.search', (['terms'], {'page': 'page', 'num': 'page_size', 'sort': 'sort'}), '(terms, page=page, num=page_size, sort=sort)\n', (804, 848), False, 'from macro.data.appengine.savedmacro import SavedMacroOps\n'), ((1523, 1848), 'macro.render.util.render_template', 'render_template', (['"""search.template"""', "{'search_error': error, 'curr_version': '%s.%s.%s' % (MAJOR_VERSION,\n MINOR_VERSION, PATCH_VERSION), 'query': terms, 'q_esc': FORM_QUERY_ESC,\n 'results': results, 'sort': sort, 'page_var': FORM_SEARCH_PAGE,\n 'prev_page': prev_page, 'page': page, 'next_page': next_page}", 'path'], {}), "('search.template', {'search_error': error, 'curr_version': \n '%s.%s.%s' % (MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION), 'query':\n terms, 'q_esc': FORM_QUERY_ESC, 'results': results, 'sort': sort,\n 'page_var': FORM_SEARCH_PAGE, 'prev_page': prev_page, 'page': page,\n 'next_page': next_page}, path)\n", (1538, 1848), False, 'from macro.render.util import render_template\n')]
|
import numpy as N
import win32com.client
# generate and import apogee ActiveX module
apogee_module = win32com.client.gencache.EnsureModule(
'{A2882C73-7CFB-11D4-9155-0060676644C1}', 0, 1, 0)
if apogee_module is None:
raise ImportError # prevent plugin from being imported
from win32com.client import constants as Constants
from traits.api import Str, Int, Enum, Float, Bool
from traitsui.api import View, Item
from Camera import Camera, CameraError
class ApogeeCam(Camera):
'''Apogee Alta or Ascent camera'''
plugin_info = {
'name': 'Apogee',
'description': 'Apogee Alta or Ascent camera',
'author': '<NAME>',
'copyright year': '2011',
}
camera_num2 = Int(0)
camera_model = Str()
driver_version = Str()
interface = Enum('usb', 'net')
expose_time = Float(0.05)
open_shutter = Bool(True)
view = View(
Item('interface'),
Item('camera_number'),
Item('camera_num2'),
Item('expose_time'),
Item('open_shutter'))
def __init__(self, **traits):
super(ApogeeCam, self).__init__(camera_number=0, **traits)
self._cam = win32com.client.Dispatch('Apogee.Camera2')
self._interface_constants = {
'usb': Constants.Apn_Interface_USB,
'net': Constants.Apn_Interface_NET}
self._reverse_constants = dict((v, k)
for k, v in self._interface_constants.iteritems())
self._buffer = None
def open(self):
self._cam.Init(self._interface_constants[self.interface],
self.camera_number, self.camera_num2, 0)
self._buffer = N.zeros(self.roi[-1:-3:-1], dtype=N.uint16)
def close(self):
self._cam.Close()
def query_frame(self, expose_time=None, open_shutter=None):
"""
Start an exposure and wait for it to finish.
Pass @expose_time or @open_shutter to override the camera object's
default parameters.
"""
if expose_time is None:
expose_time = self.expose_time
if open_shutter is None:
open_shutter = self.open_shutter
try:
self._cam.Expose(expose_time, open_shutter)
while self._cam.ImagingStatus != Constants.Apn_Status_ImageReady:
pass
self._cam.GetImage(self._buffer.ctypes.data)
finally:
if self._cam.ImagingStatus < 0:
self.reset()
self.frame = N.copy(self._buffer)
def choose_camera(self):
discover = win32com.client.Dispatch('Apogee.CamDiscover')
discover.DlgCheckUsb = True
discover.ShowDialog(True)
if not discover.ValidSelection:
raise ValueError('No camera selected')
self.interface = self._reverse_constants[discover.SelectedInterface]
self.camera_number = discover.SelectedCamIdOne
self.camera_num2 = discover.SelectedCamIdTwo
def reset(self):
self._cam.ResetState()
# if error status persists, raise an exception
if self._cam.ImagingStatus < 0:
raise CameraError('Error not cleared by reset', self.camera_number)
def _resolution_default(self):
return self._cam.ImagingColumns, self._cam.ImagingRows
def _camera_model_default(self):
return self._cam.CameraModel
def _driver_version_default(self):
return self._cam.DriverVersion
def _id_string_default(self):
return 'Apogee {} Driver version: {}'.format(
self.camera_model,
self.driver_version)
def _roi_default(self):
return (self._cam.RoiStartX,
self._cam.RoiStartY,
self._cam.RoiPixelsH,
self._cam.RoiPixelsV)
def _roi_changed(self, value):
x, y, w, h = value
self._cam.RoiStartX = x
self._cam.RoiStartY = y
self._cam.RoiPixelsH = w
self._cam.RoiPixelsV = h
self._buffer = N.zeros((h, w), dtype=N.uint16)
|
[
"traits.api.Float",
"Camera.CameraError",
"numpy.copy",
"traits.api.Int",
"numpy.zeros",
"traits.api.Bool",
"traits.api.Str",
"traitsui.api.Item",
"traits.api.Enum"
] |
[((715, 721), 'traits.api.Int', 'Int', (['(0)'], {}), '(0)\n', (718, 721), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((741, 746), 'traits.api.Str', 'Str', ([], {}), '()\n', (744, 746), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((768, 773), 'traits.api.Str', 'Str', ([], {}), '()\n', (771, 773), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((790, 808), 'traits.api.Enum', 'Enum', (['"""usb"""', '"""net"""'], {}), "('usb', 'net')\n", (794, 808), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((827, 838), 'traits.api.Float', 'Float', (['(0.05)'], {}), '(0.05)\n', (832, 838), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((858, 868), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (862, 868), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((895, 912), 'traitsui.api.Item', 'Item', (['"""interface"""'], {}), "('interface')\n", (899, 912), False, 'from traitsui.api import View, Item\n'), ((922, 943), 'traitsui.api.Item', 'Item', (['"""camera_number"""'], {}), "('camera_number')\n", (926, 943), False, 'from traitsui.api import View, Item\n'), ((953, 972), 'traitsui.api.Item', 'Item', (['"""camera_num2"""'], {}), "('camera_num2')\n", (957, 972), False, 'from traitsui.api import View, Item\n'), ((982, 1001), 'traitsui.api.Item', 'Item', (['"""expose_time"""'], {}), "('expose_time')\n", (986, 1001), False, 'from traitsui.api import View, Item\n'), ((1011, 1031), 'traitsui.api.Item', 'Item', (['"""open_shutter"""'], {}), "('open_shutter')\n", (1015, 1031), False, 'from traitsui.api import View, Item\n'), ((1642, 1685), 'numpy.zeros', 'N.zeros', (['self.roi[-1:-3:-1]'], {'dtype': 'N.uint16'}), '(self.roi[-1:-3:-1], dtype=N.uint16)\n', (1649, 1685), True, 'import numpy as N\n'), ((2468, 2488), 'numpy.copy', 'N.copy', (['self._buffer'], {}), '(self._buffer)\n', (2474, 2488), True, 'import numpy as N\n'), ((3948, 3979), 'numpy.zeros', 'N.zeros', (['(h, w)'], {'dtype': 'N.uint16'}), '((h, w), dtype=N.uint16)\n', (3955, 3979), True, 'import numpy as N\n'), ((3097, 3158), 'Camera.CameraError', 'CameraError', (['"""Error not cleared by reset"""', 'self.camera_number'], {}), "('Error not cleared by reset', self.camera_number)\n", (3108, 3158), False, 'from Camera import Camera, CameraError\n')]
|
# -*- coding: utf-8 -*-
from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, \
get_engines_supply, get_planes_capacity, get_airport_name
from app.common.http_methods_unittests import get_request
from app.common.target_urls import MY_AIRPORT
import unittest
class TestAirportParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.__html_page = get_request(MY_AIRPORT)
def test_country(self):
country = get_country(self.__html_page)
self.assertEqual(u'Égypte', country)
def test_money(self):
country = get_money(self.__html_page)
self.assertEqual(2444908, country)
def test_kerosene_supply(self):
country = get_kerosene_supply(self.__html_page)
self.assertEqual(2009391, country)
def test_kerosene_capacity(self):
country = get_kerosene_capacity(self.__html_page)
self.assertEqual(2500000, country)
def test_engines_supply(self):
engines_supply = get_engines_supply(self.__html_page)
self.assertEqual(1000, engines_supply['5'])
self.assertEqual(2, engines_supply['6'])
def test_planes_capacity(self):
country = get_planes_capacity(self.__html_page)
self.assertEqual(9, country)
def test_airport_name(self):
country = get_airport_name(self.__html_page)
self.assertEqual(u'Roissy aéroport', country)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"app.airport.airports_parsers.get_country",
"app.airport.airports_parsers.get_kerosene_supply",
"app.airport.airports_parsers.get_planes_capacity",
"app.airport.airports_parsers.get_kerosene_capacity",
"app.airport.airports_parsers.get_engines_supply",
"app.airport.airports_parsers.get_money",
"app.common.http_methods_unittests.get_request",
"app.airport.airports_parsers.get_airport_name"
] |
[((1473, 1488), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1486, 1488), False, 'import unittest\n'), ((433, 456), 'app.common.http_methods_unittests.get_request', 'get_request', (['MY_AIRPORT'], {}), '(MY_AIRPORT)\n', (444, 456), False, 'from app.common.http_methods_unittests import get_request\n'), ((504, 533), 'app.airport.airports_parsers.get_country', 'get_country', (['self.__html_page'], {}), '(self.__html_page)\n', (515, 533), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((624, 651), 'app.airport.airports_parsers.get_money', 'get_money', (['self.__html_page'], {}), '(self.__html_page)\n', (633, 651), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((750, 787), 'app.airport.airports_parsers.get_kerosene_supply', 'get_kerosene_supply', (['self.__html_page'], {}), '(self.__html_page)\n', (769, 787), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((888, 927), 'app.airport.airports_parsers.get_kerosene_capacity', 'get_kerosene_capacity', (['self.__html_page'], {}), '(self.__html_page)\n', (909, 927), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((1032, 1068), 'app.airport.airports_parsers.get_engines_supply', 'get_engines_supply', (['self.__html_page'], {}), '(self.__html_page)\n', (1050, 1068), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((1225, 1262), 'app.airport.airports_parsers.get_planes_capacity', 'get_planes_capacity', (['self.__html_page'], {}), '(self.__html_page)\n', (1244, 1262), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n'), ((1352, 1386), 'app.airport.airports_parsers.get_airport_name', 'get_airport_name', (['self.__html_page'], {}), '(self.__html_page)\n', (1368, 1386), False, 'from app.airport.airports_parsers import get_country, get_money, get_kerosene_supply, get_kerosene_capacity, get_engines_supply, get_planes_capacity, get_airport_name\n')]
|
import json
class Gabi:
def myFunc(self, game, hand, cards):
print("GABI")
try:
if (hand[0]["rank"] == hand[1]["rank"]):
print("pair, returning 800")
return 800
elif (hand[0]["rank"] in "89TJQKA" and hand[1]["rank"] in "89TJQKA"):
print("high card, returning 600")
return 600
else:
return game_state["current_buy_in"] - game_state["players"][game_state["in_action"]]["bet"]
print("nopair")
except Exception as ex:
print("bad gabi")
print(ex)
finally:
print("GABI END")
def calcBet(self, game):
return 500
def fulsh(self, hand, cards):
allcards = hand + cards
hearts = 0
spades = 0
clubs = 0
diamonds = 0
for card in allcards:
if (card["suit"] == "hearts"):
hearts += 1
elif (card["suit"] == "spades"):
spades += 1
elif (card["suit"] == "clubs"):
clubs += 1
elif (card["suit"] == "diamonds"):
diamonds += 1
if (hearts >=4 | spades >= 4 | clubs >= 4 | diamonds >= 4):
return True
return False
def straight(self, hand, cards):
allcards = hand + cards
result = []
for card in allcards:
if (card["rank"] == "J"):
card["rank"] = 11
elif (card["rank"] == "Q"):
card["rank"] = 12
elif (card["rank"] == "K"):
card["rank"] = 13
elif (card["rank"] == "A"):
card["rank"] = 14
else:
card["rank"] = int(card["rank"])
result.append(card["rank"])
result.sort()
j = 0
for i in range(0, len(result) - 1):
if (result[i] + 1 == result[i + 1]):
j+=1
if (j > 3):
return True
else:
return False
if __name__ == '__main__':
json_data=open("sample.json").read()
data = json.loads(json_data)
asd = data["current_buy_in"] - data["players"][data["in_action"]]["bet"]
print(Gabi().fulsh([{'rank': '3', 'suit': 'hearts'},{'rank': 'K','suit': 'hearts'},{'rank': '3', 'suit': 'hearts'},{'rank': 'K','suit': 'hearts'},{'rank': '3', 'suit': 'hearts'},{'rank': 'K','suit': 'spades'}], []))
|
[
"json.loads"
] |
[((2138, 2159), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (2148, 2159), False, 'import json\n')]
|
"""Build and install the windspharm package."""
# Copyright (c) 2012-2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os.path
from setuptools import setup
import versioneer
packages = ['windspharm',
'windspharm.examples',
'windspharm.tests']
package_data = {
'windspharm.examples': ['example_data/*'],
'windspharm.tests': ['data/regular/*.npy', 'data/gaussian/*.npy']}
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
long_description = f.read()
setup(name='windspharm',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='vector wind analysis in spherical coordinates',
author='<NAME>',
author_email='<EMAIL>',
url='http://ajdawson.github.com/windspharm/',
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
package_data=package_data,
install_requires=['numpy', 'pyspharm >= 1.0.8'],)
|
[
"versioneer.get_version",
"versioneer.get_cmdclass"
] |
[((1579, 1603), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (1601, 1603), False, 'import versioneer\n'), ((1620, 1645), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (1643, 1645), False, 'import versioneer\n')]
|
"""tests for vak.cli.predict module"""
import pytest
import vak.cli.predict
import vak.config
import vak.constants
import vak.paths
from . import cli_asserts
from ..test_core.test_predict import predict_output_matches_expected
@pytest.mark.parametrize(
"audio_format, spect_format, annot_format",
[
("cbin", None, "notmat"),
("wav", None, "birdsong-recognition-dataset"),
],
)
def test_predict(
audio_format, spect_format, annot_format, specific_config, tmp_path, model, device
):
output_dir = tmp_path.joinpath(
f"test_predict_{audio_format}_{spect_format}_{annot_format}"
)
output_dir.mkdir()
options_to_change = [
{"section": "PREDICT", "option": "output_dir", "value": str(output_dir)},
{"section": "PREDICT", "option": "device", "value": device},
]
toml_path = specific_config(
config_type="predict",
model=model,
audio_format=audio_format,
annot_format=annot_format,
options_to_change=options_to_change,
)
vak.cli.predict.predict(toml_path)
cfg = vak.config.parse.from_toml_path(toml_path)
assert predict_output_matches_expected(output_dir, cfg.predict.annot_csv_filename)
assert cli_asserts.log_file_created(command="predict", output_path=output_dir)
|
[
"pytest.mark.parametrize"
] |
[((232, 379), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""audio_format, spect_format, annot_format"""', "[('cbin', None, 'notmat'), ('wav', None, 'birdsong-recognition-dataset')]"], {}), "('audio_format, spect_format, annot_format', [(\n 'cbin', None, 'notmat'), ('wav', None, 'birdsong-recognition-dataset')])\n", (255, 379), False, 'import pytest\n')]
|
import json
import logging
import config as cfg
from modules.zabbix_sender import send_to_zabbix
logger = logging.getLogger(__name__)
"""zabbixにDevice LLDデータを送信します。
result = {"/dev/sda": {"model": EXAMPLE SSD 250, "POWER_CYCLE": 123 ...}}
@param result 送信するデータ
@param discoveryKey zabbix discovery key. ex) megacli.lld.adapter
@param discoveryPHName discovery placeholder for values ex) SASADDR
"""
def send_device_discovery(result, discoveryKey, discoveryPHName):
logger.info("Sending device discovery to zabbix")
discovery_result = []
for key in result:
discovery_result.append({discoveryPHName: key})
data = {"request": "sender data", "data":[]}
valueStr = json.dumps({"data": discovery_result})
one_data = {"host": cfg.ZABBIX_HOST, "key": discoveryKey, "value": f"{valueStr}"}
data["data"].append(one_data)
result = send_to_zabbix(data)
logger.info(result)
return None
"""interpriterで解釈出来たデータを送信する。
smartctlが解釈してくれたもの+独自に解釈したデータ
data = {
"host1": {
"item1": 1234,
"item2": "value"
},
"host2": {
"item1": 5678,
"item2": "value"
}
}
"""
def send_data(data):
logger.info("Send data to zabbix")
results = []
for mainkey in data:
detail = data[mainkey] # discovery key
for key in detail:
results.append({
"host": cfg.ZABBIX_HOST,
"key": key,
"value": detail[key],
})
sender_data = {"request": "sender data", "data": results}
result = send_to_zabbix(sender_data)
logger.info(result)
return None
|
[
"modules.zabbix_sender.send_to_zabbix",
"logging.getLogger",
"json.dumps"
] |
[((108, 135), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (125, 135), False, 'import logging\n'), ((682, 720), 'json.dumps', 'json.dumps', (["{'data': discovery_result}"], {}), "({'data': discovery_result})\n", (692, 720), False, 'import json\n'), ((849, 869), 'modules.zabbix_sender.send_to_zabbix', 'send_to_zabbix', (['data'], {}), '(data)\n', (863, 869), False, 'from modules.zabbix_sender import send_to_zabbix\n'), ((1480, 1507), 'modules.zabbix_sender.send_to_zabbix', 'send_to_zabbix', (['sender_data'], {}), '(sender_data)\n', (1494, 1507), False, 'from modules.zabbix_sender import send_to_zabbix\n')]
|
# coding=utf-8
"""
Provides a common interface for executing commands.
"""
import sys
import subprocess
import doctor.report as report
from doctor.report import supports_color
def get_argv(cmd: str) -> list:
""" Return a list of arguments from a fully-formed command line. """
return cmd.strip().split(' ')
def display(cmd: str):
""" Emit a diagnostic message that looks like the execution of a command line. """
diagnostic = f'$ {cmd}'
diagnostic = f'\x1b[0;37m{diagnostic}\x1b[0m' if supports_color(sys.stderr) else diagnostic
report.information(diagnostic, wrapped=False)
def execute(cmd: str, show_argv: bool=False, show_output: bool=False) -> int:
""" Execute a command-line process and return exit code.
If show_argv is True, display the executed command with parameters/arguments.
If show_output is True, display the resulting output from the executed command.
The resulting output of the executed command is not redirected (unless show_output is False,
in which case it is quelched), which means it might be printed on either stdout or stderr
depending on the executed command.
"""
argv = get_argv(cmd)
if show_argv:
display(cmd)
result = subprocess.run(
argv,
stdout=sys.stdout if show_output else subprocess.DEVNULL,
stderr=sys.stderr if show_output else subprocess.DEVNULL)
return result.returncode
|
[
"subprocess.run",
"doctor.report.supports_color",
"doctor.report.information"
] |
[((564, 609), 'doctor.report.information', 'report.information', (['diagnostic'], {'wrapped': '(False)'}), '(diagnostic, wrapped=False)\n', (582, 609), True, 'import doctor.report as report\n'), ((1237, 1378), 'subprocess.run', 'subprocess.run', (['argv'], {'stdout': '(sys.stdout if show_output else subprocess.DEVNULL)', 'stderr': '(sys.stderr if show_output else subprocess.DEVNULL)'}), '(argv, stdout=sys.stdout if show_output else subprocess.\n DEVNULL, stderr=sys.stderr if show_output else subprocess.DEVNULL)\n', (1251, 1378), False, 'import subprocess\n'), ((516, 542), 'doctor.report.supports_color', 'supports_color', (['sys.stderr'], {}), '(sys.stderr)\n', (530, 542), False, 'from doctor.report import supports_color\n')]
|
"""
Drive stepper motor 28BYJ-48 using ULN2003
"""
from machine import Pin
from time import sleep_ms
# define pins for ULN2003
IN1 = Pin(16, Pin.OUT)
IN2 = Pin(17, Pin.OUT)
IN3 = Pin(5, Pin.OUT)
IN4 = Pin(18, Pin.OUT)
# half-step mode
# counter clockwise step sequence
seq_ccw = [[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1]]
# clockwise step sequence
seq_cw = seq_ccw[::-1]
delay = 1 # ms, delay between steps
# one clockwise revolution (4096 steps)
for i in range(4096):
step = i % 8
IN1.value(seq_cw[step][0])
IN2.value(seq_cw[step][1])
IN3.value(seq_cw[step][2])
IN4.value(seq_cw[step][3])
sleep_ms(1)
# one counterclockwise revolution (4096 steps)
for i in range(4096):
step = i % 8
IN1.value(seq_ccw[step][0])
IN2.value(seq_ccw[step][1])
IN3.value(seq_ccw[step][2])
IN4.value(seq_ccw[step][3])
sleep_ms(1)
|
[
"time.sleep_ms",
"machine.Pin"
] |
[((135, 151), 'machine.Pin', 'Pin', (['(16)', 'Pin.OUT'], {}), '(16, Pin.OUT)\n', (138, 151), False, 'from machine import Pin\n'), ((158, 174), 'machine.Pin', 'Pin', (['(17)', 'Pin.OUT'], {}), '(17, Pin.OUT)\n', (161, 174), False, 'from machine import Pin\n'), ((181, 196), 'machine.Pin', 'Pin', (['(5)', 'Pin.OUT'], {}), '(5, Pin.OUT)\n', (184, 196), False, 'from machine import Pin\n'), ((204, 220), 'machine.Pin', 'Pin', (['(18)', 'Pin.OUT'], {}), '(18, Pin.OUT)\n', (207, 220), False, 'from machine import Pin\n'), ((769, 780), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (777, 780), False, 'from time import sleep_ms\n'), ((1000, 1011), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (1008, 1011), False, 'from time import sleep_ms\n')]
|
import vectorincrement
import os
import gin
import sparse_causal_model_learner_rl.learners.rl_learner as learner
import sparse_causal_model_learner_rl.learners.abstract_learner as abstract_learner
import sparse_causal_model_learner_rl.config as config
import pytest
from sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper import load_config_files
def test_learn_vectorincrement():
ve_config_path = os.path.join(os.path.dirname(vectorincrement.__file__), 'config', 've5.gin')
learner_config_path = os.path.join(os.path.dirname(learner.__file__), '..', 'configs', 'test.gin')
print(ve_config_path, learner_config_path)
load_config_files([ve_config_path, learner_config_path])
l = learner.CausalModelLearnerRL(config.Config())
l.train()
gin.clear_config()
class EmptyLearner(abstract_learner.AbstractLearner):
def maybe_write_artifacts(self, path_epoch, add_artifact_local):
pass
@property
def _context_subclass(self):
return {}
def collect_steps(self):
pass
def __repr__(self):
return ""
def test_abstract_learner_create():
f = os.path.join(os.path.dirname(abstract_learner.__file__), '..', 'configs', 'base_learner.gin')
load_config_files([f])
l = EmptyLearner(config.Config())
l.train()
gin.clear_config()
@pytest.fixture(autouse=True)
def clean_gin():
gin.clear_config()
yield
gin.clear_config()
|
[
"os.path.dirname",
"pytest.fixture",
"gin.clear_config",
"sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper.load_config_files",
"sparse_causal_model_learner_rl.config.Config"
] |
[((1376, 1404), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1390, 1404), False, 'import pytest\n'), ((662, 718), 'sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper.load_config_files', 'load_config_files', (['[ve_config_path, learner_config_path]'], {}), '([ve_config_path, learner_config_path])\n', (679, 718), False, 'from sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper import load_config_files\n'), ((796, 814), 'gin.clear_config', 'gin.clear_config', ([], {}), '()\n', (812, 814), False, 'import gin\n'), ((1271, 1293), 'sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper.load_config_files', 'load_config_files', (['[f]'], {}), '([f])\n', (1288, 1293), False, 'from sparse_causal_model_learner_rl.sacred_gin_tune.sacred_wrapper import load_config_files\n'), ((1353, 1371), 'gin.clear_config', 'gin.clear_config', ([], {}), '()\n', (1369, 1371), False, 'import gin\n'), ((1428, 1446), 'gin.clear_config', 'gin.clear_config', ([], {}), '()\n', (1444, 1446), False, 'import gin\n'), ((1463, 1481), 'gin.clear_config', 'gin.clear_config', ([], {}), '()\n', (1479, 1481), False, 'import gin\n'), ((439, 480), 'os.path.dirname', 'os.path.dirname', (['vectorincrement.__file__'], {}), '(vectorincrement.__file__)\n', (454, 480), False, 'import os\n'), ((543, 576), 'os.path.dirname', 'os.path.dirname', (['learner.__file__'], {}), '(learner.__file__)\n', (558, 576), False, 'import os\n'), ((759, 774), 'sparse_causal_model_learner_rl.config.Config', 'config.Config', ([], {}), '()\n', (772, 774), True, 'import sparse_causal_model_learner_rl.config as config\n'), ((1185, 1227), 'os.path.dirname', 'os.path.dirname', (['abstract_learner.__file__'], {}), '(abstract_learner.__file__)\n', (1200, 1227), False, 'import os\n'), ((1316, 1331), 'sparse_causal_model_learner_rl.config.Config', 'config.Config', ([], {}), '()\n', (1329, 1331), True, 'import sparse_causal_model_learner_rl.config as config\n')]
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/29/2021 8:38 PM
# @File:obj_utils
from python_developer_tools.python.string_utils import str_is_null
def obj_is_null(obj):
"""判断对象是否为空"""
if obj is None:
return True
if isinstance(obj, list) and len(obj) == 0:
return True
if isinstance(obj, str):
return str_is_null(obj)
|
[
"python_developer_tools.python.string_utils.str_is_null"
] |
[((376, 392), 'python_developer_tools.python.string_utils.str_is_null', 'str_is_null', (['obj'], {}), '(obj)\n', (387, 392), False, 'from python_developer_tools.python.string_utils import str_is_null\n')]
|
"""
The pypositioning.system.load_files.py module contains functions allowing to load measurement results from various
types of files. The currently available functions allow to load **.psd** files collected with TI Packet Sniffer and
results obtained using IONIS localization system.
Copyright (C) 2020 <NAME>
"""
import numpy as np
import pandas as pd
def load_ionis_file(filepath, normalize_ts=False):
""" Load measurement file from IONIS system
Parameters
----------
filepath: str
path to measurement file
normalize_ts: bool
if True set ts base to 0 (rounds ts to full seconds)
Returns
-------
ble_df: DataFrame
data frame with ble rssi results (contains whole ble packets received by the anchors)
uwb_df: Dataframe
data frame with uwb-based toa results
ts_0: float
timestamp of the first received packet
"""
# open psd file
f = open(filepath, 'r')
ble_res = []
uwb_res = []
# split and decode each line
for line in f:
s = line.split('\t')
# if int(s[3]) == 19:
# ... # the packet is empty
if int(s[5]) * 12 + int(s[6]) * 8 + 19 > int(s[3]):
print("faulty packet, ts: " + s[0])
else:
# get ble and uwb packets number
ble_n = int(s[5])
uwb_n = int(s[6])
# for each ble packet
for k in range(ble_n):
bps = 6 + k * 8
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
ble_res.append(s[:3] + [s[4]] + s[bps + 1:bps + 9])
for j in range(uwb_n):
ups = 6 + ble_n * 8 + j * 4
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
uwb_res.append(s[:3] + s[ups + 1:ups + 5])
# reshape the arrays
ble_res = np.array(ble_res)
uwb_res = np.array(uwb_res)
if ble_res.size > 0:
ble_df = pd.DataFrame(data=ble_res,
columns=['ts', 'an_id', 'an_sqn', 'an_p', 'rx_id', 'tag_id', 'ble_ts', 'rssi',
'pres', 'volt', 'steps', 'alert'])
ble_df = ble_df.astype(dtype={'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32', 'an_p': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'ble_ts': 'int32',
'rssi': 'float', 'pres': 'int32', 'volt': 'int32',
'steps': 'int32', 'alert': 'int32'})
ble_df.loc[ble_df['rssi'] == 0, 'rssi'] = np.nan
else:
ble_df = None
if uwb_res.size > 0:
uwb_df = pd.DataFrame(data=uwb_res, columns=['ts', 'an_id', 'an_sqn', 'rx_id', 'tag_id', 'uwb_sqn', 'toa'])
uwb_df = uwb_df.astype({'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'uwb_sqn': 'int32', 'toa': 'float'})
uwb_df['toa'] = uwb_df['toa'].values * 15.65e-12
else:
uwb_df = None
if normalize_ts:
ts_min = 0
if (uwb_res.size > 0 and ble_res.size > 0):
ts_min = np.minimum(ble_df.ts.min(), uwb_df.ts.min())
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
elif uwb_res.size > 0:
ts_min = uwb_df.ts.min()
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
print('no ble results in a file - normalizing uwb ts only')
elif ble_res.size > 0:
ts_min = ble_df.ts.min()
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
print('no uwb results in a file - normalizing ble ts only')
return ble_df, uwb_df, ts_min / 1000
return ble_df, uwb_df, 0
def synchronize_toa_ionis(m_uwb, an, an_r):
""" Synchronize toa values according to IONIS synchronization scheme
Parameters
----------
m_uwb: DataFrame
data frame with uwb measurement results
an: ndarray
anchor nodes coordinates [id,x,y,z]
an_r: ndarray
reference anchor node coordinates [x,y,z]
Returns
-------
m_uwb: DataFrame
m_uwb data frame with toa values synchronized
"""
# initialize array with empty rows for missing anchors
an_f = np.empty((int(an[:, 0].max()), 3))
an_f[:] = np.NaN
for a in an:
an_f[int(a[0]) - 1, :] = a[1:]
m_uwb["toa"] = m_uwb.toa + np.linalg.norm(an_f[m_uwb.an_id - 1] - an_r, axis=1) / 3e8
return m_uwb
def distribute_packets_ionis(df):
""" Distribute packets that could be delayed and came to the system controller at the same time
Parameters
----------
df: DataFrame
dataframe containing measurement results with timestamps and sqns. It must include columns:
[ts, an_sqn, an_id]. Timestamp must be rounded to full seconds (might be float)
Returns
-------
df_d: DataFrame
dataframe, where the packet ts were corrected to the reception times, which would occur
without the delay
"""
# copy the dataframe
df_d = df.copy()
# get unique anchor ids
anchor_ids = df.an_id.unique()
# for each anchor search for delayed packets and distribute them
for an_id in anchor_ids:
mask_an_id = df.an_id == an_id
uts = df[mask_an_id].ts.unique()
for i in range(uts.size):
ts = df[mask_an_id & (df.ts == uts[i])]
an_sqns = ts.an_sqn.unique()
# if the results
if an_sqns.size > 1:
# find last properly received packet
pi = 1
while df[mask_an_id & (df.ts == uts[i - pi])].an_sqn.unique().size > 1:
pi = pi + 1
prev_ts = uts[i - pi]
prev_an_sqn = df_d[(df_d.an_id == an_id) & (df_d.ts == uts[i - pi])].an_sqn.values[0]
# correct timestamps
tse = distribute_packet_batch(ts.ts, ts.an_sqn, prev_ts, prev_an_sqn)
df_d.ts[(df_d.an_id == an_id) & (df_d.ts == uts[i])] = tse
return df_d
def distribute_packet_batch(ts, an_sqn, ts_p, an_sqn_p):
"""Correct timestamps of the packets, which were received in a batch due to delay introduced in the WiFi interface.
Parameters
----------
ts: array_like
timestamps of packets received in a batch [in seconds]
an_sqn: array_like
anchor sqns of packets received in a batch [0-255]
ts_p: float
the timestamp of the last properly received packet
an_sqn_p: int
the anchor sqn of the last properly received packet
Returns
-------
tse: ndarray
timestamps corrected to the reception times, which would occur without the delay
"""
# empty list for collected packets
tse = []
for t, ans in zip(ts, an_sqn):
# check if anchor sqn is higher than the previous one or the counter has turned
if ans >= an_sqn_p:
te = ts_p + ans - an_sqn_p
else:
te = ts_p + (256 + ans - an_sqn_p)
tse.append(te)
return np.array(tse)
def rearrange_timestamps_ble(m_ble, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of BLE packet inside WiFi frame.
Parameters
----------
m_ble: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_b: DataFrame
Input m_ble DataFrame with rearranged timestamps.
"""
# filter tag id
m_b = m_ble[m_ble.tag_id == tag_id]
if distribute_delayed: # distribute delayed packets
m_b = distribute_packets_ionis(m_b)
# group and bin by BLE ts
grouped = m_b.groupby(by=['ts', 'an_id', 'an_sqn', 'tag_id'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.ble_ts, packet_rate, labels=False))
m_b['bin'] = pd.concat(bins)
# group and average power per BLE receiver
grouped = m_b.groupby(by=['ts', 'an_id', 'tag_id', 'bin'])
m_b = grouped.agg({'rssi': log_mean})
m_b = m_b.reset_index()
# get ts with 1/rate
m_b['ts'] = m_b['ts'] + m_b['bin'] / packet_rate
return m_b
def rearrange_timestamps_uwb(m_uwb, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of UWB packet inside WiFi frame.
Parameters
----------
m_uwb: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_u: DataFrame
Input m_uwb DataFrame with rearranged timestamps.
"""
# filter tag id
m_u = m_uwb[m_uwb.tag_id == tag_id].copy()
if distribute_delayed: # distribute delayed packets
m_u = distribute_packets_ionis(m_u)
# group and bin by reception ts (in this case toa value)
grouped = m_u.groupby(by=['ts', 'an_sqn'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.toa, packet_rate, labels=False))
m_u['bin'] = pd.concat(bins)
# get ts with 1/rate
m_u['ts'] = m_u['ts'] + m_u['bin'] / packet_rate
return m_u
def measurement_array(m_df, mtype, data_frame=False):
"""Create measurement array [ts, meas values...]
Parameters
----------
m_df: DataFrame
measurement dataframe
mtype: str
measurement type: 'ble', 'uwb'
data_frame: bool
return dataframe, None tuple if true
Returns
-------
array: ndarray
measurement array in format [ts, mx, my, mz ...]
an_ids: ndarray
anchor_ids: [x,y,z ...]
df: DataFrame, optional
measurement array stored as dataframe (returned when data_frame==True)
"""
if mtype == 'uwb':
m = m_df[['ts', 'uwb_sqn', 'toa', 'an_id']].copy()
elif mtype == 'ble':
m = m_df[['ts', 'rssi', 'an_id']].copy()
else:
print("Unknown type")
return None, None
df = None
# get unique anchor ids
anchor_ids = np.sort(m.an_id.unique())
# create array
if mtype == 'uwb':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'toa': 'toa_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts', 'uwb_sqn'])
df = df.sort_values(['ts', 'uwb_sqn'], ascending=[True, True]).reset_index(drop=True)
df = df.drop(columns='uwb_sqn')
elif mtype == 'ble':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'rssi': 'rssi_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts'])
df = df.sort_values(['ts'], ascending=[True]).reset_index(drop=True)
array = df.values
anchor_ids = np.r_[0, anchor_ids] # add 0 for ts column
if data_frame:
return array, anchor_ids, df
return array, anchor_ids
def hybrid_array(dfs, on='ts', how='outer'):
""" Create a hybrid measurement array
Parameters
----------
dfs: iterable
DataFrames which would be merged into a hybrid frame
on: str, default: 'ts'
on which column the frames will be merged
how: str, default: 'outer'
how the frames will be merged
Returns
-------
m: ndarray
measurement array in format [ts, results...]
m_type: ndarray
type of data in each of the columns e.g. ['ts', 'rssi', 'toa]
m_id: ndarray
anchor ids of the columns df. Default id is 0 - for 'ts' and other parameter
not associated with any particular anchor.
df: DataFrame
hybrid DataFrame
"""
df = dfs[0]
for d in dfs[1:]:
df = df.merge(d, on=on, how=how)
m_type= np.array([x.split('_')[0] for x in df.columns[:]])
m_id= np.array([x.split('_')[1] if '_' in x else 0 for x in df.columns[:] ]).astype('int')
return df.values, m_type, m_id, df
def log_mean(v_db, axis=None):
""" Calculate average for values in log scale by converting to linear and back to log.
Parameters
----------
v_db: ndarray
values in log scale
axis: {int, None}, optional
axis along which the mean will be calculated
Returns
-------
avg: {ndarray, double}
mean value
"""
v_lin = 10 ** (v_db / 10) # Power in mW
l_mean = np.nanmean(v_lin, axis=axis)
db_mean = 10 * np.log10(l_mean)
return db_mean
|
[
"pandas.DataFrame",
"pandas.cut",
"numpy.rint",
"numpy.array",
"numpy.linalg.norm",
"numpy.log10",
"pandas.concat",
"numpy.nanmean"
] |
[((1886, 1903), 'numpy.array', 'np.array', (['ble_res'], {}), '(ble_res)\n', (1894, 1903), True, 'import numpy as np\n'), ((1918, 1935), 'numpy.array', 'np.array', (['uwb_res'], {}), '(uwb_res)\n', (1926, 1935), True, 'import numpy as np\n'), ((7188, 7201), 'numpy.array', 'np.array', (['tse'], {}), '(tse)\n', (7196, 7201), True, 'import numpy as np\n'), ((8358, 8373), 'pandas.concat', 'pd.concat', (['bins'], {}), '(bins)\n', (8367, 8373), True, 'import pandas as pd\n'), ((9834, 9849), 'pandas.concat', 'pd.concat', (['bins'], {}), '(bins)\n', (9843, 9849), True, 'import pandas as pd\n'), ((13234, 13262), 'numpy.nanmean', 'np.nanmean', (['v_lin'], {'axis': 'axis'}), '(v_lin, axis=axis)\n', (13244, 13262), True, 'import numpy as np\n'), ((1979, 2123), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ble_res', 'columns': "['ts', 'an_id', 'an_sqn', 'an_p', 'rx_id', 'tag_id', 'ble_ts', 'rssi',\n 'pres', 'volt', 'steps', 'alert']"}), "(data=ble_res, columns=['ts', 'an_id', 'an_sqn', 'an_p',\n 'rx_id', 'tag_id', 'ble_ts', 'rssi', 'pres', 'volt', 'steps', 'alert'])\n", (1991, 2123), True, 'import pandas as pd\n'), ((2687, 2789), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'uwb_res', 'columns': "['ts', 'an_id', 'an_sqn', 'rx_id', 'tag_id', 'uwb_sqn', 'toa']"}), "(data=uwb_res, columns=['ts', 'an_id', 'an_sqn', 'rx_id',\n 'tag_id', 'uwb_sqn', 'toa'])\n", (2699, 2789), True, 'import pandas as pd\n'), ((13282, 13298), 'numpy.log10', 'np.log10', (['l_mean'], {}), '(l_mean)\n', (13290, 13298), True, 'import numpy as np\n'), ((3249, 3292), 'numpy.rint', 'np.rint', (['((ble_df.ts - ts_min).values / 1000)'], {}), '((ble_df.ts - ts_min).values / 1000)\n', (3256, 3292), True, 'import numpy as np\n'), ((3317, 3360), 'numpy.rint', 'np.rint', (['((uwb_df.ts - ts_min).values / 1000)'], {}), '((uwb_df.ts - ts_min).values / 1000)\n', (3324, 3360), True, 'import numpy as np\n'), ((4524, 4576), 'numpy.linalg.norm', 'np.linalg.norm', (['(an_f[m_uwb.an_id - 1] - an_r)'], {'axis': '(1)'}), '(an_f[m_uwb.an_id - 1] - an_r, axis=1)\n', (4538, 4576), True, 'import numpy as np\n'), ((8296, 8339), 'pandas.cut', 'pd.cut', (['g.ble_ts', 'packet_rate'], {'labels': '(False)'}), '(g.ble_ts, packet_rate, labels=False)\n', (8302, 8339), True, 'import pandas as pd\n'), ((9757, 9797), 'pandas.cut', 'pd.cut', (['g.toa', 'packet_rate'], {'labels': '(False)'}), '(g.toa, packet_rate, labels=False)\n', (9763, 9797), True, 'import pandas as pd\n'), ((3454, 3497), 'numpy.rint', 'np.rint', (['((uwb_df.ts - ts_min).values / 1000)'], {}), '((uwb_df.ts - ts_min).values / 1000)\n', (3461, 3497), True, 'import numpy as np\n'), ((3663, 3706), 'numpy.rint', 'np.rint', (['((ble_df.ts - ts_min).values / 1000)'], {}), '((ble_df.ts - ts_min).values / 1000)\n', (3670, 3706), True, 'import numpy as np\n')]
|
import ChessFuntions
import pprint
game = ChessFuntions.Chessgame()
game.setup()
game.wereToMove(2, 1)
|
[
"ChessFuntions.Chessgame"
] |
[((42, 67), 'ChessFuntions.Chessgame', 'ChessFuntions.Chessgame', ([], {}), '()\n', (65, 67), False, 'import ChessFuntions\n')]
|
import logging
import inspect
import mechanize
log = logging.getLogger(__name__)
class Service(object):
"""
The superclass of all services.
When creating a service, inherit from this class
and implement the following methods as necessary:
__init__
authenticate
check_<attribute>
modify_<attribute>
"""
_NO_DOCSTRING = "No documentation provided."
_authenticated = False
_cache = {}
def __init__(self, *args, **kwargs):
raise NotImplementedError()
def authenticate(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def documentation(cls):
doc = inspect.getdoc(cls)
base = [doc or cls._NO_DOCSTRING, "Attributes:"]
name = lambda: "=== %s ===" % cls.__name__
return name() + "\n" + "\n\t".join(base
+ ["\t%s %s" % (name, doc or "")
for name, doc in cls.attributes_with_docstrings()]
)
@classmethod
def attributes(cls):
methods = dir(cls)
return sorted([x.replace('check_', '')
for x in methods if x.startswith('check_')
and (x.replace('check_', 'modify_') in methods)])
@classmethod
def attributes_with_docstrings(cls):
return [(x, inspect.getdoc(getattr(cls, "check_" + x))
or inspect.getdoc(getattr(cls, "modify_" + x)))
for x in cls.attributes()]
def __match(self, check, value, takes_arguments):
if takes_arguments:
r = check(value)
if r in [True, False]:
return r
else:
raise RuntimeError(
"Value returned from %s.%s not True or False" %
(check.im_class.__name__, check.__name__)
)
else:
return check() == value
def ensure(self, key, value):
name = self.__class__.__name__
log = logging.getLogger("service.%s" % name)
check = getattr(self, "check_" + key.lower(), None)
modify = getattr(self, "modify_" + key.lower(), None)
if check and modify:
log.info("Checking %s on %s...", key, name)
takes_arguments = len(inspect.getargspec(check).args[1:]) > 0
match = lambda: self.__match(check, value, takes_arguments)
if not match():
log.info("Did not find expected value '%s'.", value)
log.info("Updating %s on %s...", key, name)
modify(value)
if not match():
raise RuntimeError("Value of %s on %s has not changed "
"after modification. Please verify.",
key, name)
else:
log.info("Success! Updated %s on %s.", key, name)
return True
elif check and not modify:
log.warning("Missing modifier for %s on %s.", key, name)
elif modify and not check:
log.warning("Missing checker for %s on %s.", key, name)
else: # this property does not exist on this service
return None
def cached_property(fn):
"""
Decorator that turns the given method into a cached property.
To clear the cache, delete self._cache[fn].
The preferred way of clearing the cache is by using an
"@invalidates_cache" decorator on another method.
"""
def wrapped(self, *args, **kwargs):
if fn not in self._cache:
self._cache[fn] = fn(self, *args, **kwargs)
return self._cache[fn]
return property(wrapped)
def invalidates_cache(fn):
"""
Clears all cached properties after the decorated function is called.
Useful when changing external (third-party) state that requires
reverification. (e.g.: decorate a "modify_something" method with this.)
"""
def wrapped(self, *args, **kwargs):
r = fn(self, *args, **kwargs)
self._cache = {}
return r
return wrapped
def with_new_browser(fn):
"""
Forces a new browser object to be created before running the wrapped
function.
"""
def wrapped(self, *args, **kwargs):
self.browser = mechanize.Browser()
return fn(self, *args, **kwargs)
return wrapped
def requires_authentication(fn):
"""
Decorator that forces the "authenticate" method of a service
to have been called before the given method.
"""
def wrapped(self, *args, **kwargs):
if not self._authenticated:
self.browser = self.authenticate(mechanize.Browser())
self._authenticated = True
return fn(self, *args, **kwargs)
return wrapped
|
[
"inspect.getargspec",
"mechanize.Browser",
"inspect.getdoc",
"logging.getLogger"
] |
[((53, 80), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (70, 80), False, 'import logging\n'), ((669, 688), 'inspect.getdoc', 'inspect.getdoc', (['cls'], {}), '(cls)\n', (683, 688), False, 'import inspect\n'), ((1965, 2003), 'logging.getLogger', 'logging.getLogger', (["('service.%s' % name)"], {}), "('service.%s' % name)\n", (1982, 2003), False, 'import logging\n'), ((4243, 4262), 'mechanize.Browser', 'mechanize.Browser', ([], {}), '()\n', (4260, 4262), False, 'import mechanize\n'), ((4609, 4628), 'mechanize.Browser', 'mechanize.Browser', ([], {}), '()\n', (4626, 4628), False, 'import mechanize\n'), ((2245, 2270), 'inspect.getargspec', 'inspect.getargspec', (['check'], {}), '(check)\n', (2263, 2270), False, 'import inspect\n')]
|
# A collection of various tools to help estimate and analyze the tail exponent.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from FatTailedTools.plotting import plot_survival_function
from FatTailedTools.survival import get_survival_function
def fit_alpha_linear(series, tail_start_mad=2.5, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
The estimated location of the Pareto (with the estimated tail exponent) will also re returned if 'return_loc' is True.
'''
# Get survival function values
if plot:
survival, ax = plot_survival_function(series, tail_zoom=False)
else:
survival = get_survival_function(series)
# Estimate tail start (= everything beyond 'tail_start_mad' mean absolute deviations)
tail_start = get_tail_start(series, tail_start_mad)
# Get tail
survival_tail = np.log10(survival.loc[survival['Values'] >= tail_start].iloc[:-1])
# Fit the tail
tail_fit = np.polyfit(survival_tail['Values'], survival_tail['P'], 1)
lin_func = np.poly1d(tail_fit)
# Get tail parameter and location/scale
tail = -tail_fit[0]
location = (1 - tail_fit[1]) / tail_fit[0]
# Get MSE (mean squared error)
mse_error = np.mean(np.square(np.subtract(lin_func(survival_tail['Values']), survival_tail['Values'])))
# Plot the fit
if plot:
ax.plot(10**survival_tail['Values'], 10**lin_func(survival_tail['Values']), 'r');
ax.legend(['Fit (MSE = {:.2f})'.format(mse_error), 'Data']);
plt.title('Tail exponent fitted to tail (alpha = {:.2f}, loc = {:.2f})'.format(tail, location));
# Construct result
result = tail, location if return_loc else tail
return result
def get_tail_start(series, tail_start_mad):
'''
Returns the start of the tail of 'series' based on 'tail_start_mad'.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
'''
return tail_start_mad * series.abs().mad()
from scipy.stats import t
def fit_alpha(series, plot=True):
'''
Estimates the tail parameter by fitting a Studend-T to the data.
If the passed data is from a one-sided distribution, it will first be mirrored at 0 to make it symmetrical.
'''
# Is the data only one-sided?
if (series.dropna() < 0).sum() * (series.dropna() > 0).sum() == 0:
# ... then construct a two-sided distribution
series = pd.concat([-series.dropna().abs(), series.dropna().abs()])
# Fit the distribution
params = t.fit(series.dropna())
if plot:
_, ax = plot_survival_function(series, distribution=(t, params));
plt.title('Tail exponent estimated from fitting (alpha = {:.2f})'.format(params[0]));
return params[0]
import seaborn as sns
def fit_alpha_subsampling(series, frac=0.7, n_subsets=100, n_tail_start_samples=1, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
Uses 'n_subsets' subsamples to average results over subsets with a fraction 'frac' of samples kept.
If return_loc is True, also returns where the tail of the distribution is assumed to start (using random subsampling with 'n_tail_start_samples' samples per subset).
'''
# Set up lists
_results_both = []
_results_left = []
_results_right = []
# Subsample and fit
for subsample in [series.sample(frac=frac) for i in range(n_subsets)]:
for tail_start_mad in np.random.normal(2.5, 0.5, n_tail_start_samples):
_results_both.append(subsample.abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_left.append(subsample.where(subsample < 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_right.append(subsample.where(subsample >= 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
# Assemble into DataFrame
alphas = pd.DataFrame.from_records(np.hstack([_results_both, _results_left, _results_right]), columns=pd.MultiIndex.from_product([['Both', 'Left', 'Right'], ['Tail Exponent', 'Location']]))
# Plot
if plot:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Tail exponents for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Tail Exponent')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Tail Exponent')].median(), alphas[(name, 'Tail Exponent')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Tail exponent ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Also plot locations if return_loc
if return_loc:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Locations for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Location')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Location')].median(), alphas[(name, 'Location')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Location ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Construct result
result = alphas if return_loc else alphas.loc[:, (slice(None), 'Tail Exponent')]
return result
|
[
"numpy.poly1d",
"seaborn.histplot",
"matplotlib.pyplot.show",
"numpy.polyfit",
"FatTailedTools.survival.get_survival_function",
"numpy.hstack",
"pandas.MultiIndex.from_product",
"FatTailedTools.plotting.plot_survival_function",
"numpy.random.normal",
"numpy.log10",
"matplotlib.pyplot.subplots"
] |
[((1093, 1159), 'numpy.log10', 'np.log10', (["survival.loc[survival['Values'] >= tail_start].iloc[:-1]"], {}), "(survival.loc[survival['Values'] >= tail_start].iloc[:-1])\n", (1101, 1159), True, 'import numpy as np\n'), ((1199, 1257), 'numpy.polyfit', 'np.polyfit', (["survival_tail['Values']", "survival_tail['P']", '(1)'], {}), "(survival_tail['Values'], survival_tail['P'], 1)\n", (1209, 1257), True, 'import numpy as np\n'), ((1273, 1292), 'numpy.poly1d', 'np.poly1d', (['tail_fit'], {}), '(tail_fit)\n', (1282, 1292), True, 'import numpy as np\n'), ((795, 842), 'FatTailedTools.plotting.plot_survival_function', 'plot_survival_function', (['series'], {'tail_zoom': '(False)'}), '(series, tail_zoom=False)\n', (817, 842), False, 'from FatTailedTools.plotting import plot_survival_function\n'), ((872, 901), 'FatTailedTools.survival.get_survival_function', 'get_survival_function', (['series'], {}), '(series)\n', (893, 901), False, 'from FatTailedTools.survival import get_survival_function\n'), ((2873, 2929), 'FatTailedTools.plotting.plot_survival_function', 'plot_survival_function', (['series'], {'distribution': '(t, params)'}), '(series, distribution=(t, params))\n', (2895, 2929), False, 'from FatTailedTools.plotting import plot_survival_function\n'), ((3831, 3879), 'numpy.random.normal', 'np.random.normal', (['(2.5)', '(0.5)', 'n_tail_start_samples'], {}), '(2.5, 0.5, n_tail_start_samples)\n', (3847, 3879), True, 'import numpy as np\n'), ((4419, 4476), 'numpy.hstack', 'np.hstack', (['[_results_both, _results_left, _results_right]'], {}), '([_results_both, _results_left, _results_right])\n', (4428, 4476), True, 'import numpy as np\n'), ((4638, 4673), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (4650, 4673), True, 'import matplotlib.pyplot as plt\n'), ((5299, 5309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5307, 5309), True, 'import matplotlib.pyplot as plt\n'), ((4486, 4576), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Both', 'Left', 'Right'], ['Tail Exponent', 'Location']]"], {}), "([['Both', 'Left', 'Right'], ['Tail Exponent',\n 'Location']])\n", (4512, 4576), True, 'import pandas as pd\n'), ((4869, 4994), 'seaborn.histplot', 'sns.histplot', ([], {'data': "alphas[name, 'Tail Exponent']", 'color': "['C7', 'C3', 'C0'][idx]", 'stat': '"""probability"""', 'bins': '(10)', 'ax': 'ax[idx]'}), "(data=alphas[name, 'Tail Exponent'], color=['C7', 'C3', 'C0'][\n idx], stat='probability', bins=10, ax=ax[idx])\n", (4881, 4994), True, 'import seaborn as sns\n'), ((5418, 5453), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (5430, 5453), True, 'import matplotlib.pyplot as plt\n'), ((6090, 6100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6098, 6100), True, 'import matplotlib.pyplot as plt\n'), ((5664, 5783), 'seaborn.histplot', 'sns.histplot', ([], {'data': "alphas[name, 'Location']", 'color': "['C7', 'C3', 'C0'][idx]", 'stat': '"""probability"""', 'bins': '(10)', 'ax': 'ax[idx]'}), "(data=alphas[name, 'Location'], color=['C7', 'C3', 'C0'][idx],\n stat='probability', bins=10, ax=ax[idx])\n", (5676, 5783), True, 'import seaborn as sns\n')]
|
import types
import warnings
from collections.abc import Iterable
from inspect import getfullargspec
import numpy as np
class _DatasetApply:
"""
Helper class to apply function to
`pysprint.core.bases.dataset.Dataset` objects.
"""
def __init__(
self,
obj,
func,
axis=None,
args=None,
kwargs=None
):
self.obj = obj
self.args = args or ()
self.kwargs = kwargs or {}
self.f = func
self.axis = axis
if self.axis == "x" or self.axis == 0:
self.target = "x"
elif self.axis == "y" or self.axis == 1:
self.target = "y"
else:
raise ValueError("Axis must be 'x', 'y', '0' or '1'.")
self.shape = len(getattr(self.obj, self.target))
def perform(self):
"""
Apply the specified function.
"""
if isinstance(self.f, str):
func = getattr(self.obj, self.f)
sig = getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# Let's assume we don't mess up the shape internally
func(*self.args, **self.kwargs)
return self.obj # we need to return this because of `inplacify` deco.
elif isinstance(self.f, np.ufunc):
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
elif isinstance(self.f, types.FunctionType):
sig = getfullargspec(self.f)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# we can safely vectorize it here
self.f = np.vectorize(self.f)
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
def _validate(self, val):
if isinstance(val, (Iterable, np.ndarray)):
val = np.asarray(val, dtype=np.float64)
if val.ndim != 1:
val = np.concatenate(val).ravel()
warnings.warn("Function return value was flattened.")
if len(val) != len(np.unique(val)):
if len(np.unique(val)) == self.shape:
return val
else:
if self.target == "x":
raise ValueError(
f"Function returned duplicated values which is not allowed when"
" modifying the x axis. After filtering to unique values "
f"a {len(np.unique(val))}-length array was produced, "
f"but {self.shape} was expected."
)
return val
if len(val) != self.shape:
retval = self._broadcast(val)
return retval
return val
else:
raise TypeError("Function should return a number or Iterable type.")
def _broadcast(self, val):
if len(val) > self.shape:
return val[:self.shape]
elif len(val) < self.shape:
if not self.shape % len(val) == 0:
raise ValueError("Cannot broadcast safely to the desired shape.")
else:
return np.repeat(val, (self.shape % len(val)))
|
[
"numpy.vectorize",
"inspect.getfullargspec",
"numpy.concatenate",
"numpy.asarray",
"warnings.warn",
"numpy.unique"
] |
[((1015, 1035), 'inspect.getfullargspec', 'getfullargspec', (['func'], {}), '(func)\n', (1029, 1035), False, 'from inspect import getfullargspec\n'), ((2363, 2396), 'numpy.asarray', 'np.asarray', (['val'], {'dtype': 'np.float64'}), '(val, dtype=np.float64)\n', (2373, 2396), True, 'import numpy as np\n'), ((2494, 2547), 'warnings.warn', 'warnings.warn', (['"""Function return value was flattened."""'], {}), "('Function return value was flattened.')\n", (2507, 2547), False, 'import warnings\n'), ((1748, 1770), 'inspect.getfullargspec', 'getfullargspec', (['self.f'], {}), '(self.f)\n', (1762, 1770), False, 'from inspect import getfullargspec\n'), ((1921, 1941), 'numpy.vectorize', 'np.vectorize', (['self.f'], {}), '(self.f)\n', (1933, 1941), True, 'import numpy as np\n'), ((2580, 2594), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (2589, 2594), True, 'import numpy as np\n'), ((2450, 2469), 'numpy.concatenate', 'np.concatenate', (['val'], {}), '(val)\n', (2464, 2469), True, 'import numpy as np\n'), ((2620, 2634), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (2629, 2634), True, 'import numpy as np\n'), ((3006, 3020), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (3015, 3020), True, 'import numpy as np\n')]
|
# CONTAGEM DE PARES — Crie um programa que mostre na tela todos os números pares entre 1 e 50.
from time import sleep
print('NÚMEROS PARES ENTRE 2 E 50\n')
sleep(1)
for i in range(2, 51, 2):
print(i)
|
[
"time.sleep"
] |
[((157, 165), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (162, 165), False, 'from time import sleep\n')]
|
import os
import pytest
from sqlalchemy.inspection import inspect
from sqlalchemy_utils.functions import drop_database
from alembic import config
from dbutils import conn_uri_factory, DbConnection
def pytest_addoption(parser):
"""
Custom command line options required for test runs
"""
parser.addoption('--name', action='store', default=None,
help='Schema [transactional | warehouse] to operate on')
parser.addoption('--adapter', action='store', default='sqlite',
help='SQLAlchemy database connection adapter')
parser.addoption('--conf', action='store',
default=os.path.join(os.path.dirname(__file__),
'../alembic.ini'),
help="Alembic config INI path")
parser.addoption('--dbconf', action='store',
default=os.path.join(os.path.dirname(__file__),
'../conf/db.yml'),
help='Database connection parameters YAML path')
@pytest.fixture(scope='session')
def schema_name(request):
return request.config.getoption('--name')
@pytest.fixture(scope='session')
def alembic_cfg(request):
return request.config.getoption('--conf')
@pytest.fixture(scope='session')
def db_conf(request):
return request.config.getoption('--dbconf')
@pytest.fixture(scope='session')
def db_adapter(request):
return request.config.getoption('--adapter')
@pytest.fixture(scope='session')
def alchemy_url(db_conf, db_adapter, schema_name, request):
return conn_uri_factory(db_conf, db_adapter, schema_name)
@pytest.fixture(scope='session', autouse=True)
def db_setup(alembic_cfg, schema_name, db_conf, db_adapter, alchemy_url, request):
# run all db migrations
config.main(['-c', alembic_cfg, '-n', schema_name,
'-x', 'dbconf={0}'.format(db_conf),
'-x', 'adapter={0}'.format(db_adapter),
'upgrade', 'head'])
def db_drop():
# db teardown - drop all
config.main(['-c', alembic_cfg, '-n', schema_name,
'-x', 'dbconf={0}'.format(db_conf),
'-x', 'adapter={0}'.format(db_adapter),
'downgrade', 'base'])
# drop db incl. alembic tables
drop_database(alchemy_url)
request.addfinalizer(db_drop)
@pytest.fixture(scope='session')
def db_conn(alchemy_url):
return DbConnection(alchemy_url)
@pytest.fixture(scope='session')
def db_inspector(db_conn):
return inspect(db_conn.engine)
@pytest.fixture(scope='session')
def alembic_tables():
"""
Tables created by Alembic to track migrations.
Fixture is maintained to differentiate alembic tables
from application tables when `inspector.get_table_names()`
for tests
"""
tbl_list = ['alembic_version']
return tbl_list
@pytest.fixture(scope='session')
def db_tables(schema_name):
"""
Manifest of all application tables expected
in database when all migrations are run
"""
if schema_name.lower() == 'transactional':
tbl_list = ['participant', 'program',
'participant_program', 'program_provider',
'provider', 'outcome',
'exit_type', 'wage', 'entity_type']
else:
tbl_list = []
# sort table list - easier test comparison
tbl_list.sort()
return tbl_list
|
[
"sqlalchemy.inspection.inspect",
"os.path.dirname",
"pytest.fixture",
"dbutils.DbConnection",
"dbutils.conn_uri_factory",
"sqlalchemy_utils.functions.drop_database"
] |
[((1059, 1090), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1073, 1090), False, 'import pytest\n'), ((1166, 1197), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1180, 1197), False, 'import pytest\n'), ((1273, 1304), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1287, 1304), False, 'import pytest\n'), ((1378, 1409), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1392, 1409), False, 'import pytest\n'), ((1487, 1518), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1501, 1518), False, 'import pytest\n'), ((1644, 1689), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (1658, 1689), False, 'import pytest\n'), ((2388, 2419), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2402, 2419), False, 'import pytest\n'), ((2486, 2517), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2500, 2517), False, 'import pytest\n'), ((2583, 2614), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2597, 2614), False, 'import pytest\n'), ((2898, 2929), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2912, 2929), False, 'import pytest\n'), ((1590, 1640), 'dbutils.conn_uri_factory', 'conn_uri_factory', (['db_conf', 'db_adapter', 'schema_name'], {}), '(db_conf, db_adapter, schema_name)\n', (1606, 1640), False, 'from dbutils import conn_uri_factory, DbConnection\n'), ((2457, 2482), 'dbutils.DbConnection', 'DbConnection', (['alchemy_url'], {}), '(alchemy_url)\n', (2469, 2482), False, 'from dbutils import conn_uri_factory, DbConnection\n'), ((2556, 2579), 'sqlalchemy.inspection.inspect', 'inspect', (['db_conn.engine'], {}), '(db_conn.engine)\n', (2563, 2579), False, 'from sqlalchemy.inspection import inspect\n'), ((2323, 2349), 'sqlalchemy_utils.functions.drop_database', 'drop_database', (['alchemy_url'], {}), '(alchemy_url)\n', (2336, 2349), False, 'from sqlalchemy_utils.functions import drop_database\n'), ((666, 691), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (681, 691), False, 'import os\n'), ((898, 923), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (913, 923), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
# HarmonicNet.
# Copyright (C) 2021 <NAME>, <NAME>, S.Koppers, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Liceense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please refer to the documentation for more information about the software
# as well as for installation instructions.
#
# If you use this application for your work, please cite the following
# publication:
#
# <NAME>, <NAME>, S.Koppers, <NAME>,
# "Spherical Harmonics for Shape-Constrained 3D Cell Segmentation", ISBI, 2021.
#
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import pytorch_lightning as pl
import json
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from torch.utils.data import DataLoader
from dataloader.harmonic_dataloader import MeristemH5Dataset
from ThirdParty.radam import RAdam
class HarmonicNet_module(nn.Module):
"""Implementation of the 3D U-Net architecture.
"""
def __init__(self, in_channels, coefficients, feat_channels=16, norm_method='instance', **kwargs):
super(HarmonicNet_module, self).__init__()
self.in_channels = in_channels
self.coefficients = coefficients
self.feat_channels = feat_channels
self.norm_method = norm_method # instance | batch | none
if self.norm_method == 'instance':
self.norm = nn.InstanceNorm3d
elif self.norm_method == 'batch':
self.norm = nn.BatchNorm3d
elif self.norm_method == 'none':
self.norm = nn.Identity
else:
raise ValueError('Unknown normalization method "{0}". Choose from "instance|batch|none".'.format(self.norm_method))
# Define layer instances
self.conv_in = nn.Sequential(
nn.Conv3d(in_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
self.norm(feat_channels)
)
self.conv_pre = nn.Sequential(
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
self.norm(feat_channels)
)
self.down1 = nn.Sequential(
nn.Conv3d(feat_channels, feat_channels*4, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.down1_conv = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.down2 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*8, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.down2_conv = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.down3 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*16, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*16),
self.norm(feat_channels*16)
)
self.down3_conv = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
self.norm(feat_channels*16)
)
self.up1 = nn.Sequential(
nn.ConvTranspose3d(feat_channels*16, feat_channels*8, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.up1_conv = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.up2 = nn.Sequential(
nn.ConvTranspose3d(feat_channels*8, feat_channels*4, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.up2_conv = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.det1 = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape1 = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
self.det2 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape2 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
self.det3 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape3 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
def forward(self, img):
conv_pre1 = self.conv_in(img)
conv_pre2 = self.conv_pre(conv_pre1)
conv_pre3 = self.conv_pre(conv_pre2+conv_pre1)
conv_pre4 = self.conv_pre(conv_pre3+conv_pre2)
conv_pre5 = self.conv_pre(conv_pre4+conv_pre3)
down1 = self.down1(conv_pre5)
down1_conv = self.down1_conv(down1)
down2 = self.down2(down1_conv+down1)
down2_conv = self.down2_conv(down2)
down3 = self.down3(down2_conv+down2)
down3_conv = self.down3_conv(down3)
up1 = self.up1(down3_conv+down3)
up1_conv = self.up1_conv(torch.cat((up1,down2_conv),1))
up2 = self.up2(up1_conv+up1)
up2_conv = self.up2_conv(torch.cat((up2,down1_conv),1))
det1 = self.det1(down3_conv)
shape1 = self.shape1(down3_conv)
out1 = torch.cat((det1,shape1),1)
det2 = self.det2(up1_conv)
shape2 = self.shape2(up1_conv)
out2 = torch.cat((det2,shape2),1)
det3 = self.det3(up2_conv)
shape3 = self.shape3(up2_conv)
out3 = torch.cat((det3,shape3),1)
return out1, out2, out3
class HarmonicNet(pl.LightningModule):
def __init__(self, hparams):
super(HarmonicNet, self).__init__()
if type(hparams) is dict:
hparams = Namespace(**hparams)
self.hparams = hparams
self.augmentation_dict = {}
# get the number of coefficients
self.num_coefficients = int((self.hparams.sh_order+1)**2)
# networks
self.network = HarmonicNet_module(in_channels=hparams.in_channels, coefficients=self.num_coefficients, feat_channels=hparams.feat_channels, norm_method=hparams.norm_method)
# cache for generated images
self.last_predictions = None
self.last_imgs = None
self.last_masks = None
def forward(self, z):
return self.network(z)
def load_pretrained(self, pretrained_file, strict=True, verbose=True):
# Load the state dict
state_dict = torch.load(pretrained_file)['state_dict']
# Make sure to have a weight dict
if not isinstance(state_dict, dict):
state_dict = dict(state_dict)
# Get parameter dict of current model
param_dict = dict(self.network.named_parameters())
layers = []
for layer in param_dict:
if strict and not 'network.'+layer in state_dict:
if verbose:
print('Could not find weights for layer "{0}"'.format(layer))
continue
try:
param_dict[layer].data.copy_(state_dict['network.'+layer].data)
layers.append(layer)
except (RuntimeError, KeyError) as e:
print('Error at layer {0}:\n{1}'.format(layer, e))
self.network.load_state_dict(param_dict)
if verbose:
print('Loaded weights for the following layers:\n{0}'.format(layers))
def loss_centroid(self, y_hat, y):
loss = F.l1_loss(y_hat, y, reduction='none')
weight = y*torch.clamp(torch.sum(y<0.5),1,y.numel()) + (1-y)*torch.clamp(torch.sum(y>0.5),1,y.numel())
weight = torch.div(weight, y.numel())
loss = torch.mul(loss, weight)
loss = torch.sum(loss)
loss = torch.div(loss, torch.clamp(torch.sum(weight), 1, y.numel()))
return loss
def loss_encoding(self, y_hat, y, mask):
loss = F.l1_loss(y_hat, y, reduction='none')
loss = torch.mul(loss, mask)
loss = torch.sum(loss)
loss = torch.div(loss, torch.clamp(torch.sum(mask)*self.num_coefficients, self.num_coefficients, y.numel()))
return loss
def training_step(self, batch, batch_idx):
# Get image ans mask of current batch
self.last_imgs = batch['image']
self.centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
self.encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
self.centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
self.encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
self.centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
self.encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
# generate images
self.pred_small, self.pred_medium, self.pred_large = self.forward(self.last_imgs)
# get the centroid losses
loss_centroid_small = self.loss_centroid(self.pred_small[:,0:1,...], self.centroid_small)
loss_centroid_medium = self.loss_centroid(self.pred_medium[:,0:1,...], self.centroid_medium)
loss_centroid_large = self.loss_centroid(self.pred_large[:,0:1,...], self.centroid_large)
loss_centroid = (1/6 * loss_centroid_small + 2/6 * loss_centroid_medium + 3/6 * loss_centroid_large)
# get the encoding losses
loss_encoding_small = self.loss_encoding(self.pred_small[:,1:,...], self.encoding_small, self.centroid_small)
loss_encoding_medium = self.loss_encoding(self.pred_medium[:,1:,...], self.encoding_medium, self.centroid_medium)
loss_encoding_large = self.loss_encoding(self.pred_large[:,1:,...], self.encoding_large, self.centroid_large)
loss_encoding = (1/6 * loss_encoding_small + 2/6 * loss_encoding_medium + 3/6 * loss_encoding_large)
loss = loss_centroid * self.hparams.centroid_weight + \
loss_encoding/self.num_coefficients * self.hparams.encoding_weight
tqdm_dict = {'centroid_loss': loss_centroid, 'encoding_loss':loss_encoding, 'epoch': self.current_epoch}
output = OrderedDict({
'loss': loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
if torch.isnan(loss):
print('Image minmax: {0}/{1}'.format(self.last_imgs.min(), self.last_imgs.max()))
print('Cent_small:{0}, Cent_medium:{1}, Cent_large:{2}, Enc_small:{3}, Enc_medium:{4}, Enc_large:{5}'.format(self.centroid_small.min(),\
self.centroid_medium.min(),\
self.centroid_large.min(),\
self.encoding_small.min(),\
self.encoding_medium.min(),\
self.encoding_large.min()))
print('Cent_small:{0}, Cent_medium:{1}, Cent_large:{2}, Enc_small:{3}, Enc_medium:{4}, Enc_large:{5}'.format(loss_centroid_small,\
loss_centroid_medium,\
loss_centroid_large,\
loss_encoding_small,\
loss_encoding_medium,\
loss_encoding_large))
return output
def test_step(self, batch, batch_idx):
test_imgs = batch['image']
centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
pred_small, pred_medium, pred_large = self.forward(test_imgs)
loss_centroid_small = self.loss_centroid(pred_small[:,0:1,...], centroid_small)
loss_centroid_medium = self.loss_centroid(pred_medium[:,0:1,...], centroid_medium)
loss_centroid_large = self.loss_centroid(pred_large[:,0:1,...], centroid_large)
loss_centroid = (loss_centroid_small + loss_centroid_medium + loss_centroid_large) / 3
# get the encoding losses
loss_encoding_small = self.loss_encoding(pred_small[:,1:,...], encoding_small, centroid_small)
loss_encoding_medium = self.loss_encoding(pred_medium[:,1:,...], encoding_medium, centroid_medium)
loss_encoding_large = self.loss_encoding(pred_large[:,1:,...], encoding_large, centroid_large)
loss_encoding = (loss_encoding_small + loss_encoding_medium + loss_encoding_large) / 3
loss = self.hparams.centroid_weight * loss_centroid + \
self.hparams.encoding_weight * loss_encoding
return {'test_loss': loss}
def test_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
return {'avg_test_loss': avg_loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
val_imgs = batch['image']
centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
pred_small, pred_medium, pred_large = self.forward(val_imgs)
loss_centroid_small = self.loss_centroid(pred_small[:,0:1,...], centroid_small)
loss_centroid_medium = self.loss_centroid(pred_medium[:,0:1,...], centroid_medium)
loss_centroid_large = self.loss_centroid(pred_large[:,0:1,...], centroid_large)
loss_centroid = (loss_centroid_small + loss_centroid_medium + loss_centroid_large) / 3
# get the encoding losses
loss_encoding_small = self.loss_encoding(pred_small[:,1:,...], encoding_small, centroid_small)
loss_encoding_medium = self.loss_encoding(pred_medium[:,1:,...], encoding_medium, centroid_medium)
loss_encoding_large = self.loss_encoding(pred_large[:,1:,...], encoding_large, centroid_large)
loss_encoding = (loss_encoding_small + loss_encoding_medium + loss_encoding_large) / 3
loss = self.hparams.centroid_weight * loss_centroid + \
self.hparams.encoding_weight * loss_encoding
return {'val_loss': loss}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
opt = RAdam(self.network.parameters(), lr=self.hparams.learning_rate)
return [opt], []
def train_dataloader(self):
if self.hparams.train_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.train_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm,\
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size, shuffle=True, drop_last=True)
def test_dataloader(self):
if self.hparams.test_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.test_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm, \
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
def val_dataloader(self):
if self.hparams.val_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.val_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm,\
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
def on_epoch_end(self):
# log sampled images
z_slice_small = int(self.pred_small.shape[2]//2)
grid_small = torch.cat((self.pred_small[:,0:1,z_slice_small,:,:], self.centroid_small[:,0:1,z_slice_small,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_small', prediction_grid, self.current_epoch)
z_slice_medium = int(self.pred_medium.shape[2]//2)
grid_small = torch.cat((self.pred_medium[:,0:1,z_slice_medium,:,:], self.centroid_medium[:,0:1,z_slice_medium,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_medium', prediction_grid, self.current_epoch)
z_slice_large = int(self.pred_large.shape[2]//2)
grid_small = torch.cat((self.pred_large[:,0:1,z_slice_large,:,:], self.centroid_large[:,0:1,z_slice_large,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_large', prediction_grid, self.current_epoch)
z_slice_raw = int(self.last_imgs.shape[2]//2)
img_grid = torchvision.utils.make_grid(self.last_imgs[:,0,z_slice_raw,:,:])
self.logger.experiment.add_image('raw_images', img_grid, self.current_epoch)
def set_augmentations(self, augmentation_dict_file):
self.augmentation_dict = json.load(open(augmentation_dict_file))
@staticmethod
def add_model_specific_args(parent_parser):
"""
Parameters you define here will be available to your model through self.hparams
"""
parser = ArgumentParser(parents=[parent_parser])
# network params
parser.add_argument('--in_channels', default=1, type=int)
parser.add_argument('--feat_channels', default=16, type=int)
parser.add_argument('--patch_size', default=(64,128,128), type=int, nargs='+')
parser.add_argument('--norm_method', default='instance', type=str)
# data
parser.add_argument('--data_norm', default='percentile', type=str)
parser.add_argument('--data_root', default=r'D:\LfB\pytorchRepo\data\PNAS', type=str)
parser.add_argument('--train_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_train.csv', type=str)
parser.add_argument('--test_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_test.csv', type=str)
parser.add_argument('--val_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_val.csv', type=str)
parser.add_argument('--image_groups', default=('data/image',), type=str, nargs='+')
parser.add_argument('--strides', default=(2,4,8), type=int, nargs='+')
parser.add_argument('--sh_order', default=5, type=int)
# training params (opt)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--centroid_weight', default=0.50, type=float)
parser.add_argument('--encoding_weight', default=0.50, type=float)
return parser
|
[
"argparse.Namespace",
"torch.nn.PReLU",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.stack",
"torch.nn.Conv3d",
"torch.nn.ConvTranspose3d",
"torch.load",
"torch.nn.functional.l1_loss",
"torch.cat",
"torch.mul",
"torchvision.utils.make_grid",
"collections.OrderedDict",
"torch.sum",
"torch.isnan",
"dataloader.harmonic_dataloader.MeristemH5Dataset",
"torch.nn.Sigmoid"
] |
[((9704, 9732), 'torch.cat', 'torch.cat', (['(det1, shape1)', '(1)'], {}), '((det1, shape1), 1)\n', (9713, 9732), False, 'import torch\n'), ((9829, 9857), 'torch.cat', 'torch.cat', (['(det2, shape2)', '(1)'], {}), '((det2, shape2), 1)\n', (9838, 9857), False, 'import torch\n'), ((9954, 9982), 'torch.cat', 'torch.cat', (['(det3, shape3)', '(1)'], {}), '((det3, shape3), 1)\n', (9963, 9982), False, 'import torch\n'), ((12027, 12064), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['y_hat', 'y'], {'reduction': '"""none"""'}), "(y_hat, y, reduction='none')\n", (12036, 12064), True, 'import torch.nn.functional as F\n'), ((12237, 12260), 'torch.mul', 'torch.mul', (['loss', 'weight'], {}), '(loss, weight)\n', (12246, 12260), False, 'import torch\n'), ((12276, 12291), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (12285, 12291), False, 'import torch\n'), ((12454, 12491), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['y_hat', 'y'], {'reduction': '"""none"""'}), "(y_hat, y, reduction='none')\n", (12463, 12491), True, 'import torch.nn.functional as F\n'), ((12507, 12528), 'torch.mul', 'torch.mul', (['loss', 'mask'], {}), '(loss, mask)\n', (12516, 12528), False, 'import torch\n'), ((12545, 12560), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (12554, 12560), False, 'import torch\n'), ((14842, 14914), 'collections.OrderedDict', 'OrderedDict', (["{'loss': loss, 'progress_bar': tqdm_dict, 'log': tqdm_dict}"], {}), "({'loss': loss, 'progress_bar': tqdm_dict, 'log': tqdm_dict})\n", (14853, 14914), False, 'from collections import OrderedDict\n'), ((14981, 14998), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (14992, 14998), False, 'import torch\n'), ((22813, 22928), 'torch.cat', 'torch.cat', (['(self.pred_small[:, 0:1, z_slice_small, :, :], self.centroid_small[:, 0:1,\n z_slice_small, :, :])', '(0)'], {}), '((self.pred_small[:, 0:1, z_slice_small, :, :], self.\n centroid_small[:, 0:1, z_slice_small, :, :]), 0)\n', (22822, 22928), False, 'import torch\n'), ((22942, 22981), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['grid_small'], {}), '(grid_small)\n', (22969, 22981), False, 'import torchvision\n'), ((23171, 23290), 'torch.cat', 'torch.cat', (['(self.pred_medium[:, 0:1, z_slice_medium, :, :], self.centroid_medium[:, 0:\n 1, z_slice_medium, :, :])', '(0)'], {}), '((self.pred_medium[:, 0:1, z_slice_medium, :, :], self.\n centroid_medium[:, 0:1, z_slice_medium, :, :]), 0)\n', (23180, 23290), False, 'import torch\n'), ((23304, 23343), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['grid_small'], {}), '(grid_small)\n', (23331, 23343), False, 'import torchvision\n'), ((23530, 23645), 'torch.cat', 'torch.cat', (['(self.pred_large[:, 0:1, z_slice_large, :, :], self.centroid_large[:, 0:1,\n z_slice_large, :, :])', '(0)'], {}), '((self.pred_large[:, 0:1, z_slice_large, :, :], self.\n centroid_large[:, 0:1, z_slice_large, :, :]), 0)\n', (23539, 23645), False, 'import torch\n'), ((23659, 23698), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['grid_small'], {}), '(grid_small)\n', (23686, 23698), False, 'import torchvision\n'), ((23878, 23946), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['self.last_imgs[:, 0, z_slice_raw, :, :]'], {}), '(self.last_imgs[:, 0, z_slice_raw, :, :])\n', (23905, 23946), False, 'import torchvision\n'), ((24390, 24429), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]'}), '(parents=[parent_parser])\n', (24404, 24429), False, 'from argparse import ArgumentParser, Namespace\n'), ((2288, 2351), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'feat_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, feat_channels, kernel_size=3, padding=1)\n', (2297, 2351), True, 'import torch.nn as nn\n'), ((2365, 2388), 'torch.nn.PReLU', 'nn.PReLU', (['feat_channels'], {}), '(feat_channels)\n', (2373, 2388), True, 'import torch.nn as nn\n'), ((2402, 2467), 'torch.nn.Conv3d', 'nn.Conv3d', (['feat_channels', 'feat_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels, feat_channels, kernel_size=3, padding=1)\n', (2411, 2467), True, 'import torch.nn as nn\n'), ((2481, 2504), 'torch.nn.PReLU', 'nn.PReLU', (['feat_channels'], {}), '(feat_channels)\n', (2489, 2504), True, 'import torch.nn as nn\n'), ((2619, 2684), 'torch.nn.Conv3d', 'nn.Conv3d', (['feat_channels', 'feat_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels, feat_channels, kernel_size=3, padding=1)\n', (2628, 2684), True, 'import torch.nn as nn\n'), ((2698, 2721), 'torch.nn.PReLU', 'nn.PReLU', (['feat_channels'], {}), '(feat_channels)\n', (2706, 2721), True, 'import torch.nn as nn\n'), ((2735, 2800), 'torch.nn.Conv3d', 'nn.Conv3d', (['feat_channels', 'feat_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels, feat_channels, kernel_size=3, padding=1)\n', (2744, 2800), True, 'import torch.nn as nn\n'), ((2814, 2837), 'torch.nn.PReLU', 'nn.PReLU', (['feat_channels'], {}), '(feat_channels)\n', (2822, 2837), True, 'import torch.nn as nn\n'), ((2956, 3035), 'torch.nn.Conv3d', 'nn.Conv3d', (['feat_channels', '(feat_channels * 4)'], {'kernel_size': '(4)', 'padding': '(1)', 'stride': '(2)'}), '(feat_channels, feat_channels * 4, kernel_size=4, padding=1, stride=2)\n', (2965, 3035), True, 'import torch.nn as nn\n'), ((3047, 3074), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (3055, 3074), True, 'import torch.nn as nn\n'), ((3180, 3253), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (3189, 3253), True, 'import torch.nn as nn\n'), ((3263, 3290), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (3271, 3290), True, 'import torch.nn as nn\n'), ((3302, 3375), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (3311, 3375), True, 'import torch.nn as nn\n'), ((3385, 3412), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (3393, 3412), True, 'import torch.nn as nn\n'), ((3522, 3609), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 8)'], {'kernel_size': '(4)', 'padding': '(1)', 'stride': '(2)'}), '(feat_channels * 4, feat_channels * 8, kernel_size=4, padding=1,\n stride=2)\n', (3531, 3609), True, 'import torch.nn as nn\n'), ((3615, 3642), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (3623, 3642), True, 'import torch.nn as nn\n'), ((3748, 3821), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (3757, 3821), True, 'import torch.nn as nn\n'), ((3831, 3858), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (3839, 3858), True, 'import torch.nn as nn\n'), ((3870, 3943), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (3879, 3943), True, 'import torch.nn as nn\n'), ((3953, 3980), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (3961, 3980), True, 'import torch.nn as nn\n'), ((4090, 4178), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 16)'], {'kernel_size': '(4)', 'padding': '(1)', 'stride': '(2)'}), '(feat_channels * 8, feat_channels * 16, kernel_size=4, padding=1,\n stride=2)\n', (4099, 4178), True, 'import torch.nn as nn\n'), ((4184, 4212), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 16)'], {}), '(feat_channels * 16)\n', (4192, 4212), True, 'import torch.nn as nn\n'), ((4319, 4394), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 16)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 16, feat_channels * 16, kernel_size=3, padding=1)\n', (4328, 4394), True, 'import torch.nn as nn\n'), ((4404, 4432), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 16)'], {}), '(feat_channels * 16)\n', (4412, 4432), True, 'import torch.nn as nn\n'), ((4444, 4519), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 16)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 16, feat_channels * 16, kernel_size=3, padding=1)\n', (4453, 4519), True, 'import torch.nn as nn\n'), ((4529, 4557), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 16)'], {}), '(feat_channels * 16)\n', (4537, 4557), True, 'import torch.nn as nn\n'), ((4675, 4790), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['(feat_channels * 16)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(feat_channels * 16, feat_channels * 8, kernel_size=3,\n stride=2, padding=1, output_padding=1)\n', (4693, 4790), True, 'import torch.nn as nn\n'), ((4796, 4823), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (4804, 4823), True, 'import torch.nn as nn\n'), ((4927, 4990), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 8)'], {'kernel_size': '(1)'}), '(feat_channels * 16, feat_channels * 8, kernel_size=1)\n', (4936, 4990), True, 'import torch.nn as nn\n'), ((5000, 5027), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (5008, 5027), True, 'import torch.nn as nn\n'), ((5039, 5112), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (5048, 5112), True, 'import torch.nn as nn\n'), ((5122, 5149), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (5130, 5149), True, 'import torch.nn as nn\n'), ((5257, 5371), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['(feat_channels * 8)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(feat_channels * 8, feat_channels * 4, kernel_size=3,\n stride=2, padding=1, output_padding=1)\n', (5275, 5371), True, 'import torch.nn as nn\n'), ((5377, 5404), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (5385, 5404), True, 'import torch.nn as nn\n'), ((5508, 5570), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 4)'], {'kernel_size': '(1)'}), '(feat_channels * 8, feat_channels * 4, kernel_size=1)\n', (5517, 5570), True, 'import torch.nn as nn\n'), ((5580, 5607), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (5588, 5607), True, 'import torch.nn as nn\n'), ((5619, 5692), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (5628, 5692), True, 'import torch.nn as nn\n'), ((5702, 5729), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (5710, 5729), True, 'import torch.nn as nn\n'), ((5847, 5921), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 16, feat_channels * 8, kernel_size=3, padding=1)\n', (5856, 5921), True, 'import torch.nn as nn\n'), ((5931, 5958), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (5939, 5958), True, 'import torch.nn as nn\n'), ((5970, 6043), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 4, kernel_size=3, padding=1)\n', (5979, 6043), True, 'import torch.nn as nn\n'), ((6053, 6080), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (6061, 6080), True, 'import torch.nn as nn\n'), ((6092, 6165), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (6101, 6165), True, 'import torch.nn as nn\n'), ((6175, 6202), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (6183, 6202), True, 'import torch.nn as nn\n'), ((6214, 6260), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(1)'], {'kernel_size': '(1)'}), '(feat_channels * 4, 1, kernel_size=1)\n', (6223, 6260), True, 'import torch.nn as nn\n'), ((6272, 6284), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6282, 6284), True, 'import torch.nn as nn\n'), ((6356, 6431), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 16)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 16, feat_channels * 16, kernel_size=3, padding=1)\n', (6365, 6431), True, 'import torch.nn as nn\n'), ((6441, 6469), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 16)'], {}), '(feat_channels * 16)\n', (6449, 6469), True, 'import torch.nn as nn\n'), ((6481, 6555), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 16)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 16, feat_channels * 8, kernel_size=3, padding=1)\n', (6490, 6555), True, 'import torch.nn as nn\n'), ((6565, 6592), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (6573, 6592), True, 'import torch.nn as nn\n'), ((6604, 6677), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (6613, 6677), True, 'import torch.nn as nn\n'), ((6687, 6714), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (6695, 6714), True, 'import torch.nn as nn\n'), ((6726, 6783), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', 'coefficients'], {'kernel_size': '(1)'}), '(feat_channels * 8, coefficients, kernel_size=1)\n', (6735, 6783), True, 'import torch.nn as nn\n'), ((6852, 6925), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (6861, 6925), True, 'import torch.nn as nn\n'), ((6935, 6962), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (6943, 6962), True, 'import torch.nn as nn\n'), ((6974, 7047), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 4, kernel_size=3, padding=1)\n', (6983, 7047), True, 'import torch.nn as nn\n'), ((7057, 7084), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (7065, 7084), True, 'import torch.nn as nn\n'), ((7096, 7169), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (7105, 7169), True, 'import torch.nn as nn\n'), ((7179, 7206), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (7187, 7206), True, 'import torch.nn as nn\n'), ((7218, 7264), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(1)'], {'kernel_size': '(1)'}), '(feat_channels * 4, 1, kernel_size=1)\n', (7227, 7264), True, 'import torch.nn as nn\n'), ((7276, 7288), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (7286, 7288), True, 'import torch.nn as nn\n'), ((7360, 7433), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (7369, 7433), True, 'import torch.nn as nn\n'), ((7443, 7470), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (7451, 7470), True, 'import torch.nn as nn\n'), ((7482, 7555), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (7491, 7555), True, 'import torch.nn as nn\n'), ((7565, 7592), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (7573, 7592), True, 'import torch.nn as nn\n'), ((7604, 7677), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (7613, 7677), True, 'import torch.nn as nn\n'), ((7687, 7714), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (7695, 7714), True, 'import torch.nn as nn\n'), ((7726, 7783), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', 'coefficients'], {'kernel_size': '(1)'}), '(feat_channels * 8, coefficients, kernel_size=1)\n', (7735, 7783), True, 'import torch.nn as nn\n'), ((7852, 7925), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (7861, 7925), True, 'import torch.nn as nn\n'), ((7935, 7962), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (7943, 7962), True, 'import torch.nn as nn\n'), ((7974, 8047), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (7983, 8047), True, 'import torch.nn as nn\n'), ((8057, 8084), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (8065, 8084), True, 'import torch.nn as nn\n'), ((8096, 8169), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (8105, 8169), True, 'import torch.nn as nn\n'), ((8179, 8206), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (8187, 8206), True, 'import torch.nn as nn\n'), ((8218, 8264), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(1)'], {'kernel_size': '(1)'}), '(feat_channels * 4, 1, kernel_size=1)\n', (8227, 8264), True, 'import torch.nn as nn\n'), ((8276, 8288), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8286, 8288), True, 'import torch.nn as nn\n'), ((8360, 8433), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 4)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 4, kernel_size=3, padding=1)\n', (8369, 8433), True, 'import torch.nn as nn\n'), ((8443, 8470), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 4)'], {}), '(feat_channels * 4)\n', (8451, 8470), True, 'import torch.nn as nn\n'), ((8482, 8555), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 4)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 4, feat_channels * 8, kernel_size=3, padding=1)\n', (8491, 8555), True, 'import torch.nn as nn\n'), ((8565, 8592), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (8573, 8592), True, 'import torch.nn as nn\n'), ((8604, 8677), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', '(feat_channels * 8)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feat_channels * 8, feat_channels * 8, kernel_size=3, padding=1)\n', (8613, 8677), True, 'import torch.nn as nn\n'), ((8687, 8714), 'torch.nn.PReLU', 'nn.PReLU', (['(feat_channels * 8)'], {}), '(feat_channels * 8)\n', (8695, 8714), True, 'import torch.nn as nn\n'), ((8726, 8783), 'torch.nn.Conv3d', 'nn.Conv3d', (['(feat_channels * 8)', 'coefficients'], {'kernel_size': '(1)'}), '(feat_channels * 8, coefficients, kernel_size=1)\n', (8735, 8783), True, 'import torch.nn as nn\n'), ((9461, 9492), 'torch.cat', 'torch.cat', (['(up1, down2_conv)', '(1)'], {}), '((up1, down2_conv), 1)\n', (9470, 9492), False, 'import torch\n'), ((9571, 9602), 'torch.cat', 'torch.cat', (['(up2, down1_conv)', '(1)'], {}), '((up2, down1_conv), 1)\n', (9580, 9602), False, 'import torch\n'), ((10232, 10252), 'argparse.Namespace', 'Namespace', ([], {}), '(**hparams)\n', (10241, 10252), False, 'from argparse import ArgumentParser, Namespace\n'), ((10986, 11013), 'torch.load', 'torch.load', (['pretrained_file'], {}), '(pretrained_file)\n', (10996, 11013), False, 'import torch\n'), ((21155, 21411), 'dataloader.harmonic_dataloader.MeristemH5Dataset', 'MeristemH5Dataset', (['self.hparams.train_list', 'self.hparams.data_root'], {'patch_size': 'self.hparams.patch_size', 'image_group': 'self.hparams.image_groups[0]', 'strides': 'self.hparams.strides', 'norm_method': 'self.hparams.data_norm', 'sh_order': 'self.hparams.sh_order'}), '(self.hparams.train_list, self.hparams.data_root,\n patch_size=self.hparams.patch_size, image_group=self.hparams.\n image_groups[0], strides=self.hparams.strides, norm_method=self.hparams\n .data_norm, sh_order=self.hparams.sh_order)\n', (21172, 21411), False, 'from dataloader.harmonic_dataloader import MeristemH5Dataset\n'), ((21499, 21588), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.hparams.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size=self.hparams.batch_size, shuffle=True,\n drop_last=True)\n', (21509, 21588), False, 'from torch.utils.data import DataLoader\n'), ((21724, 21979), 'dataloader.harmonic_dataloader.MeristemH5Dataset', 'MeristemH5Dataset', (['self.hparams.test_list', 'self.hparams.data_root'], {'patch_size': 'self.hparams.patch_size', 'image_group': 'self.hparams.image_groups[0]', 'strides': 'self.hparams.strides', 'norm_method': 'self.hparams.data_norm', 'sh_order': 'self.hparams.sh_order'}), '(self.hparams.test_list, self.hparams.data_root,\n patch_size=self.hparams.patch_size, image_group=self.hparams.\n image_groups[0], strides=self.hparams.strides, norm_method=self.hparams\n .data_norm, sh_order=self.hparams.sh_order)\n', (21741, 21979), False, 'from dataloader.harmonic_dataloader import MeristemH5Dataset\n'), ((22068, 22123), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.hparams.batch_size'}), '(dataset, batch_size=self.hparams.batch_size)\n', (22078, 22123), False, 'from torch.utils.data import DataLoader\n'), ((22261, 22514), 'dataloader.harmonic_dataloader.MeristemH5Dataset', 'MeristemH5Dataset', (['self.hparams.val_list', 'self.hparams.data_root'], {'patch_size': 'self.hparams.patch_size', 'image_group': 'self.hparams.image_groups[0]', 'strides': 'self.hparams.strides', 'norm_method': 'self.hparams.data_norm', 'sh_order': 'self.hparams.sh_order'}), '(self.hparams.val_list, self.hparams.data_root, patch_size\n =self.hparams.patch_size, image_group=self.hparams.image_groups[0],\n strides=self.hparams.strides, norm_method=self.hparams.data_norm,\n sh_order=self.hparams.sh_order)\n', (22278, 22514), False, 'from dataloader.harmonic_dataloader import MeristemH5Dataset\n'), ((22603, 22658), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.hparams.batch_size'}), '(dataset, batch_size=self.hparams.batch_size)\n', (22613, 22658), False, 'from torch.utils.data import DataLoader\n'), ((12335, 12352), 'torch.sum', 'torch.sum', (['weight'], {}), '(weight)\n', (12344, 12352), False, 'import torch\n'), ((18706, 18752), 'torch.stack', 'torch.stack', (["[x['test_loss'] for x in outputs]"], {}), "([x['test_loss'] for x in outputs])\n", (18717, 18752), False, 'import torch\n'), ((20708, 20753), 'torch.stack', 'torch.stack', (["[x['val_loss'] for x in outputs]"], {}), "([x['val_loss'] for x in outputs])\n", (20719, 20753), False, 'import torch\n'), ((12096, 12114), 'torch.sum', 'torch.sum', (['(y < 0.5)'], {}), '(y < 0.5)\n', (12105, 12114), False, 'import torch\n'), ((12146, 12164), 'torch.sum', 'torch.sum', (['(y > 0.5)'], {}), '(y > 0.5)\n', (12155, 12164), False, 'import torch\n'), ((12604, 12619), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (12613, 12619), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-08 10:08
# @Author : binger
from .draw_by_html import EffectFont
from .draw_by_html import FontDrawByHtml
from . import FontDraw, FontAttr
from PIL import Image, ImageDraw, ImageFont
class FontFactory(object):
def __init__(self, effect_font, render_by_mixed=False):
"""
:param render_by_mixed: False 只有浏览器生成,
:param fill_type:
"""
assert isinstance(effect_font, EffectFont)
self.render_by_mixed = render_by_mixed
self._effect_font = effect_font
self.font_draw = None
def is_effected(self):
return any([self._effect_font.gradient_enable, self._effect_font.shadow_enable, self._effect_font.stroke_enable,
self._effect_font.weight != 'normal'])
def get_line_size(self, size, text):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.get_line_size(text, size=size)
def get_max_canvas_size(self, text):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.max_canvas_size(text, is_truncated=True)
def get_size_at_limit_range(self, text, size, char_spacing_par=0.1):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.get_size_at_limit_range(text, size, char_spacing_par=char_spacing_par)
def get_text_size_at_width(self, size, text, width, spacing=4):
effect_font = ImageFont.truetype(font=self._effect_font.base.path, size=size)
len_text = len(text)
line = text
remain = ''
temp_width = 0
for n in range(len_text):
temp_width += effect_font.getsize(text[n])[0]
if temp_width > width:
line = text[:n - 1]
remain = text[n - 1:]
break
temp_width += spacing
return line, remain
def render_to_rng(self):
if self.render_by_mixed and not self.is_effected():
limit_text_cb = lambda *args, **kwargs: True
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
resp = self.font_draw.write(self._effect_font.text, limit_text_cb=limit_text_cb)
img = resp.img
else:
img = FontDrawByHtml.render_to_image(self._effect_font)
return img
if __name__ == '__main__':
font = EffectFont()
path = '../simkai.ttf'
import os
print(os.path.abspath(path))
font.set_text_base(size=100, path=path)
font.base.clear_margin = True
# font.shadow((80, 31, 191, 0.3), sigma=8, x=0, y=6)
# font.gradient([("#da7eeb", 0), ("#9070d8", 0.5)], angle="center", type="radial")
# font.fill_color = "#da7eeb"
font.fill_color = (1, 1, 1, 255)
# font.stroke("#4e1269", 1)
font.text = '实例\n中国\n实例'
obj = FontFactory(font, render_by_mixed=True)
img = obj.render_to_rng()
print(img.size)
img.save("11.png")
|
[
"PIL.ImageFont.truetype",
"os.path.abspath"
] |
[((1556, 1619), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ([], {'font': 'self._effect_font.base.path', 'size': 'size'}), '(font=self._effect_font.base.path, size=size)\n', (1574, 1619), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2574, 2595), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2589, 2595), False, 'import os\n')]
|
# Generated by Django 4.0.3 on 2022-03-12 20:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('university', '0008_rename_course_id_course_courseid_and_more'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='courseId',
new_name='courseid',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((253, 343), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""course"""', 'old_name': '"""courseId"""', 'new_name': '"""courseid"""'}), "(model_name='course', old_name='courseId', new_name=\n 'courseid')\n", (275, 343), False, 'from django.db import migrations\n')]
|
""" Test for the API that handle models
Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
Written by <NAME> <<EMAIL>>,
"""
import unittest
from fastapi.testclient import TestClient # type: ignore
from personal_context_builder import config
from personal_context_builder.wenet_fastapi_app import app
class APIModelsTestCase(unittest.TestCase):
def setUp(self):
self.client = TestClient(app)
def test_simple_lda_exist(self):
response = self.client.get(config.PCB_VIRTUAL_HOST_LOCATION + "/models/")
self.assertIn("SimpleLDA", response.json())
def test_simple_bow_exist(self):
response = self.client.get(config.PCB_VIRTUAL_HOST_LOCATION + "/models/")
self.assertIn("SimpleBOW", response.json())
|
[
"fastapi.testclient.TestClient"
] |
[((409, 424), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (419, 424), False, 'from fastapi.testclient import TestClient\n')]
|
import pytest
from exception.argument_not_instance_of_exception import ArgumentNotInstanceOfException
from guard import Guard
@pytest.mark.parametrize(
"param, typeof, param_name, message, expected",
[
(2, str, None, "parameter is not from type <class 'str'>.", pytest.raises(ArgumentNotInstanceOfException)),
([], dict, None, "parameter is not from type <class 'dict'>.", pytest.raises(ArgumentNotInstanceOfException)),
("test", bool, None, "parameter is not from type <class 'bool'>.", pytest.raises(ArgumentNotInstanceOfException))
]
)
def test_IsNotInstanceOfType_InvalidType_RaisedArgumentNotInstanceOfException(param, typeof, param_name,
message, expected):
with expected as err:
Guard.is_not_instance_of_type(param=param, typeof=typeof, param_name=param_name)
assert message in str(err.value)
|
[
"pytest.raises",
"guard.Guard.is_not_instance_of_type"
] |
[((815, 900), 'guard.Guard.is_not_instance_of_type', 'Guard.is_not_instance_of_type', ([], {'param': 'param', 'typeof': 'typeof', 'param_name': 'param_name'}), '(param=param, typeof=typeof, param_name=param_name\n )\n', (844, 900), False, 'from guard import Guard\n'), ((281, 326), 'pytest.raises', 'pytest.raises', (['ArgumentNotInstanceOfException'], {}), '(ArgumentNotInstanceOfException)\n', (294, 326), False, 'import pytest\n'), ((400, 445), 'pytest.raises', 'pytest.raises', (['ArgumentNotInstanceOfException'], {}), '(ArgumentNotInstanceOfException)\n', (413, 445), False, 'import pytest\n'), ((523, 568), 'pytest.raises', 'pytest.raises', (['ArgumentNotInstanceOfException'], {}), '(ArgumentNotInstanceOfException)\n', (536, 568), False, 'import pytest\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 13:22:41 2022
@author: sampasmann
"""
import numpy as np
from time import process_time
def reeds_data(Nx=1000, LB=-8.0, RB=8.0):
G = 1 # number of energy groups
sigt = np.empty((Nx,G))
sigs = np.empty((Nx,G,G))
source = np.empty((Nx,G))
dx = (RB-LB)/Nx
xspan = np.linspace(LB+dx/2, RB-dx/2, Nx)
count = 0
for x in xspan:
if (x < -6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
elif (-6 < x) and (x < -5):
sigt[count,:] = 1.0
sigs[count,:] = 0.9
source[count,:] = 1.0
elif (-5 < x < -3): #vacuum region 1
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-3 < x < -2):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-2 < x < 2):
sigt[count,:] = 50.0
sigs[count,:,:] = 0.0
source[count,:] = 50.0
elif (2 < x < 3):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (3 < x < 5): # vacuum region 2
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (5 < x < 6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 1.0
elif (6 < x):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
count += 1
siga = (sigt - sigs)
return sigt, sigs, siga, source, G
if __name__ == "__main__":
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(1)
stop = process_time()
time1 = stop-start
print("Elapsed time with Compliation:", time1)
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(100)
stop = process_time()
time2 = stop-start
#print("Elapsed time After Compliation:", time2)
# print("A {}x speed up".format(round(time1/time2)))
|
[
"numpy.empty",
"time.process_time",
"numpy.linspace"
] |
[((251, 268), 'numpy.empty', 'np.empty', (['(Nx, G)'], {}), '((Nx, G))\n', (259, 268), True, 'import numpy as np\n'), ((279, 299), 'numpy.empty', 'np.empty', (['(Nx, G, G)'], {}), '((Nx, G, G))\n', (287, 299), True, 'import numpy as np\n'), ((311, 328), 'numpy.empty', 'np.empty', (['(Nx, G)'], {}), '((Nx, G))\n', (319, 328), True, 'import numpy as np\n'), ((360, 401), 'numpy.linspace', 'np.linspace', (['(LB + dx / 2)', '(RB - dx / 2)', 'Nx'], {}), '(LB + dx / 2, RB - dx / 2, Nx)\n', (371, 401), True, 'import numpy as np\n'), ((1922, 1936), 'time.process_time', 'process_time', ([], {}), '()\n', (1934, 1936), False, 'from time import process_time\n'), ((1998, 2012), 'time.process_time', 'process_time', ([], {}), '()\n', (2010, 2012), False, 'from time import process_time\n')]
|
import sqlite3
import time
import hashlib
conn = None
c = None
def connect(path):
global conn, c
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute(' PRAGMA foreign_keys=ON; ')
conn.commit()
return
def drop_tables():
global conn, c
c.execute("drop table if exists demeritNotices;")
c.execute("drop table if exists tickets;")
c.execute("drop table if exists registrations;")
c.execute("drop table if exists vehicles;")
c.execute("drop table if exists marriages;")
c.execute("drop table if exists births;")
c.execute("drop table if exists persons;")
c.execute("drop table if exists payments;")
c.execute("drop table if exists users;")
conn.commit()
def define_tables():
global conn, c
c.execute('''create table persons (
fname char(12),
lname char(12),
bdate date,
bplace char(20),
address char(30),
phone char(12),
primary key (fname, lname)
); ''')
c.execute('''create table births (
regno int,
fname char(12),
lname char(12),
regdate date,
regplace char(20),
genderegister char(1),
f_fname char(12),
f_lname char(12),
m_fname char(12),
m_lname char(12),
primary key (regno),
foreign key (fname,lname) references persons,
foreign key (f_fname,f_lname) references persons,
foreign key (m_fname,m_lname) references persons
);''')
c.execute('''create table marriages (
regno int,
regdate date,
regplace char(20),
p1_fname char(12),
p1_lname char(12),
p2_fname char(12),
p2_lname char(12),
primary key (regno),
foreign key (p1_fname,p1_lname) references persons,
foreign key (p2_fname,p2_lname) references persons
);''')
c.execute('''create table vehicles (
vin char(5),
make char(10),
model char(10),
year egister int,
color char(10),
primary key (vin)
);''')
c.execute('''create table registrations (
regno int,
regdate date,
expiry date,
plate char(7),
vin char(5),
fname char(12),
lname char(12),
primary key (regno),
foreign key (vin) references vehicles,
foreign key (fname,lname) references persons
);''')
c.execute('''create table tickets (
tno int,
regno int,
fine int,
violation text,
vdate date,
primary key (tno),
foreign key (regno) references registrations
);''')
c.execute('''create table demeritNotices (
ddate date,
fname char(12),
lname char(12),
points int,
desc text,
primary key (ddate,fname,lname),
foreign key (fname,lname) references persons
);''')
c.execute('''create table payments (
tno int,
pdate date,
amount int,
primary key (tno, pdate),
foreign key (tno) references tickets
);''')
c.execute('''create table users (
uid char(8),
pwd char(8),
utype char(1), -- 'a' for agents, 'o' for officers
fname char(12),
lname char(12),
city char(15),
primary key(uid),
foreign key (fname,lname) references persons
);''')
conn.commit()
def insert_data():
global conn, c
conn.commit()
def function_name1():
global conn, c
conn.commit()
def function_name2():
global conn, c
conn.commit()
def function_name3():
global conn, c
conn.commit()
def process_bill(provide_vin, provide_fname, provide_lname, provide_platenum):
global conn, c
conn.row_factory = sqlite3.Row
c.execute('''SELECT r.fname, r.lname
FROM vehicles v, registrations r
WHERE v.vin = r.vin AND r.vin = ? AND r.plate = ?
ORDER BY r.regdate DESC
limit 1;''', (provide_vin, provide_platenum))
name = c.fetchall()
if name == []:
print("Sorry, the name you provided is not in our database.")
elif (provide_fname == name[0][0]) and (provide_lname == name[0][1]):
#end current registration
c.execute('''UPDATE registrations
SET expiry = datetime('time')
WHERE fname = ? and lname = ?;''', (provide_fname, provide_lname))
#make new registration
new_regno = random.randint(0, 1000)
c.execute('''INSERT INTO registrations(regno, regdate, expiry, plate, vin, fname, lname)
VALUES (?, datetime('now'), datetime('now', '1 year'), ?, ?, ?, ?);''', (new_regno, provide_platenum, provide_vin, provide_fname, provide_lname))
conn.commit()
return
def process_payment(ticketnum, amount):
global conn, c
c.execute('''SELECT tno FROM tickets;''')
tno = c.fetchall()
for t in tno:
if t==ticketnum:
c.execute('''SELECT sum(amount)
FROM payments
WHERE tno = ?;''', ticketnum)
total = c.fetchall()
c.execute('''SELECT fine
FROM tickets
WHERE tno = ?;''', ticketnum)
fine = c.fetchall()
if fine > (total+amount): #sum of those payments cannot exceed the fine amount of ticket
c.execute('''INSERT INTO payments(tno, pdate, amount
VALUES (?, datetime('now'), ?);''', (ticketnum, amount))
conn.commit()
def get_driver_abstract(fname, lname):
global conn, c
c.execute('''SELECT count(t.tno)
FROM registrations r, tickets t
WHERE r.regno = t.regno
and r.fname = ? and r.lname = ?;''', (fname, lname))
num_tickets = c.fetchall()
print(num_tickets)
print("The number of ticket is %d" % num_tickets[0])
c.execute('''SELECT count(*)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?;''', (fname, lname))
num_dnotices = c.fetchall()
print(num_dnotices)
print("The number of demerit notices is %d" % num_dnotices[0])
c.execute('''SELECT COALESCE(SUM(d.points),0)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?
and d.ddate >= datetime('now', '-2 years');''', (fname, lname))
points_2years = c.fetchall()
print(points_2years)
print("The total number of demerit points received within 2 years is %d" % points_2years[0])
c.execute('''SELECT COALESCE(SUM(d.points),0)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?;''', (fname, lname))
points_lifetime = c.fetchall()
print(points_lifetime)
print("The total number of demerit points received within life time is %d" % points_lifetime[0])
check = input("Would you like to see the tickets ordered form te latest to the oldest?(Y/N): ")
if check == 'Y' or check == 'y':
c.execute('''SELECT t.tno, t.vdate, t.violation, t.fine, t.regno, v.make, v.model
FROM tickets t, vehicles v, registrations r
WHERE t.regno = r.regno and r.vin = v.vin
and r.fname = ? and r.lname = ?
ORDER BY t.vdate DESC;''', (fname, lname))
tickets_infor = c.fetchall()
ticketsNum = 0
for t in tickets_infor:
ticketsNum += 1
if ticketsNum <= 5:
print(t)
else:
see_more = input("Would you like to see more tickets information?(Y/N): ")
if see_more == 'y' or see_more == 'Y':
print(t)
print(ticketsNum)
conn.commit()
def function_name7():
global conn, c
conn.commit()
def function_name8():
global conn, c
conn.commit()
def main():
global conn, c
#path = input("Enter the name of database: ")
path="./a3.db"
connect(path)
drop_tables()
define_tables()
insert_data()
#4
user_vin = input("Please provide the vin of a car: ")
currentowner_fname = input("Please provide the first name of the current owner: ")
currentowner_lname = input("Please provide the last name of the current owner: ")
plate_number = input("Please provide a plate number for the new registration: ")
process_bill(user_vin, currentowner_fname, currentowner_lname, plate_number)
#5
ticket_number = input("Please input a valid ticket number: ")
amount = ("Please input the amount you would like to pay:")
process_payment(ticket_number, amount)
#6
driver_fname = input("Please enter the first name of the driver: ")
driver_lname = input("Please enter the last name of the driver: ")
get_driver_abstract(driver_fname, driver_lname)
conn.commit()
conn.close()
return
if __name__ == "__main__":
main()
|
[
"sqlite3.connect"
] |
[((115, 136), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (130, 136), False, 'import sqlite3\n')]
|
#! /opt/jython/bin/jython
# -*- coding: utf-8 -*-
#
# delete/text_delete.py
#
# Oct/12/2016
import sys
import string
#
# ---------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/python_common')
sys.path.append ('/var/www/data_base/common/jython_common')
from jython_text_manipulate import text_read_proc
from jython_text_manipulate import text_write_proc
#
from text_manipulate import dict_delete_proc
# ---------------------------------------------------------------
#
sys.stderr.write ("*** 開始 ***\n")
#
file_in = sys.argv[1]
id_in = sys.argv[2]
print ("%s" % id_in)
dict_aa = text_read_proc (file_in)
dict_bb = dict_delete_proc (dict_aa,id_in)
text_write_proc (file_in,dict_bb)
#
sys.stderr.write ("*** 終了 ***\n")
# ---------------------------------------------------------------
|
[
"sys.path.append",
"jython_text_manipulate.text_read_proc",
"text_manipulate.dict_delete_proc",
"sys.stderr.write",
"jython_text_manipulate.text_write_proc"
] |
[((189, 247), 'sys.path.append', 'sys.path.append', (['"""/var/www/data_base/common/python_common"""'], {}), "('/var/www/data_base/common/python_common')\n", (204, 247), False, 'import sys\n'), ((249, 307), 'sys.path.append', 'sys.path.append', (['"""/var/www/data_base/common/jython_common"""'], {}), "('/var/www/data_base/common/jython_common')\n", (264, 307), False, 'import sys\n'), ((525, 557), 'sys.stderr.write', 'sys.stderr.write', (['"""*** 開始 ***\n"""'], {}), "('*** 開始 ***\\n')\n", (541, 557), False, 'import sys\n'), ((635, 658), 'jython_text_manipulate.text_read_proc', 'text_read_proc', (['file_in'], {}), '(file_in)\n', (649, 658), False, 'from jython_text_manipulate import text_read_proc\n'), ((671, 703), 'text_manipulate.dict_delete_proc', 'dict_delete_proc', (['dict_aa', 'id_in'], {}), '(dict_aa, id_in)\n', (687, 703), False, 'from text_manipulate import dict_delete_proc\n'), ((705, 738), 'jython_text_manipulate.text_write_proc', 'text_write_proc', (['file_in', 'dict_bb'], {}), '(file_in, dict_bb)\n', (720, 738), False, 'from jython_text_manipulate import text_write_proc\n'), ((741, 773), 'sys.stderr.write', 'sys.stderr.write', (['"""*** 終了 ***\n"""'], {}), "('*** 終了 ***\\n')\n", (757, 773), False, 'import sys\n')]
|
import functools
def run_once(func):
""" The decorated function will only run once. Other calls to it will
return None. The original implementation can be found on StackOverflow(https://stackoverflow.com/questions/4103773/efficient-way-of-having-a-function-only-execute-once-in-a-loop)
:param func: the function to be limited to run once only
:return: the decorated function
""" # noqa
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
if not func_wrapper.has_run:
func_wrapper.has_run = True
return func(*args, **kwargs)
func_wrapper.has_run = False
return func_wrapper
|
[
"functools.wraps"
] |
[((416, 437), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (431, 437), False, 'import functools\n')]
|
import argparse
import os
from scheduled_bots.ontology.obographs import Graph, Node
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers
from scheduled_bots import PROPS
def ec_formatter(ec_number):
splits = ec_number.split('.')
if len(splits) < 4:
for x in range(4 - len(splits)):
splits.append('-')
return '.'.join(splits)
class GONode(Node):
def _pre_create(self):
super(GONode, self)._pre_create()
self.xrefs = list(self.xrefs)
for n, xref in enumerate(self.xrefs):
if xref.startswith("EC:"):
prefix, ec_number = xref.split(":")
self.xrefs[n] = ":".join([prefix, ec_formatter(ec_number)])
self.xrefs = set(self.xrefs)
class GOGraph(Graph):
NAME = "Gene Ontology"
GRAPH_URI = 'http://purl.obolibrary.org/obo/go.owl'
QID = "Q135085"
DEFAULT_DESCRIPTION = "gene ontology term"
APPEND_PROPS = {PROPS['subclass of'], PROPS['instance of'],
PROPS['has cause'], PROPS['location'], PROPS['part of'],
PROPS['has part'], PROPS['regulates (molecular biology)'],
PROPS['Gene Ontology ID']}
FAST_RUN = False
PRED_PID_MAP = {
'is_a': PROPS['subclass of'],
'http://purl.obolibrary.org/obo/BFO_0000050': PROPS['part of'],
'http://purl.obolibrary.org/obo/BFO_0000051': PROPS['has part'],
'http://purl.obolibrary.org/obo/RO_0002211': PROPS['regulates (molecular biology)'], # regulates
'http://purl.obolibrary.org/obo/RO_0002212': None, # negatively regulates
'http://purl.obolibrary.org/obo/RO_0002213': None, # positively regulates
}
NODE_CLASS = GONode
regulates = {
'http://purl.obolibrary.org/obo/RO_0002212': 'http://purl.obolibrary.org/obo/GO_0048519',
'http://purl.obolibrary.org/obo/RO_0002213': 'http://purl.obolibrary.org/obo/GO_0048518'
}
def make_statement_from_edge(self, edge):
# custom statement creator for regulates
h = self.helper
if edge['pred'] in {'http://purl.obolibrary.org/obo/RO_0002212', 'http://purl.obolibrary.org/obo/RO_0002213'}:
subj_node = self.uri_node_map[edge['sub']]
obj_qid = self.get_object_qid(edge['obj'])
# print(obj_qid, edge['pred'])
qual_qid = self.uri_node_map[self.regulates[edge['pred']]].qid
pred_pid = self.PRED_PID_MAP['http://purl.obolibrary.org/obo/RO_0002211']
if not (obj_qid and qual_qid and pred_pid):
m = wdi_helpers.format_msg(edge['sub'], None, None, "failed on edge: {}".format(edge))
print(m)
wdi_core.WDItemEngine.log("WARNING", m)
return None
qualifier = wdi_core.WDItemID(qual_qid, h.get_pid(PROPS['subject has role']), is_qualifier=True)
return wdi_core.WDItemID(obj_qid, pred_pid, qualifiers=[qualifier],
references=[subj_node.create_ref_statement()])
else:
return super(GOGraph, self).make_statement_from_edge(edge)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='run wikidata disease ontology bot')
parser.add_argument("json_path", help="Path to json file")
parser.add_argument("--local", help="preconfigured local wikibase port 7171 and 7272", action='store_true')
args = parser.parse_args()
if args.local:
mediawiki_api_url = "http://localhost:7171/w/api.php"
sparql_endpoint_url = "http://localhost:7272/proxy/wdqs/bigdata/namespace/wdq/sparql"
login = wdi_login.WDLogin("testbot", "password", mediawiki_api_url=mediawiki_api_url)
else:
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
mediawiki_api_url = 'https://www.wikidata.org/w/api.php'
sparql_endpoint_url = 'https://query.wikidata.org/sparql'
login = wdi_login.WDLogin(WDUSER, WDPASS)
g = GOGraph(args.json_path, mediawiki_api_url=mediawiki_api_url, sparql_endpoint_url=sparql_endpoint_url)
g.run(login)
|
[
"wikidataintegrator.wdi_login.WDLogin",
"wikidataintegrator.wdi_core.WDItemEngine.log",
"argparse.ArgumentParser"
] |
[((3164, 3236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""run wikidata disease ontology bot"""'}), "(description='run wikidata disease ontology bot')\n", (3187, 3236), False, 'import argparse\n'), ((3635, 3712), 'wikidataintegrator.wdi_login.WDLogin', 'wdi_login.WDLogin', (['"""testbot"""', '"""password"""'], {'mediawiki_api_url': 'mediawiki_api_url'}), "('testbot', 'password', mediawiki_api_url=mediawiki_api_url)\n", (3652, 3712), False, 'from wikidataintegrator import wdi_login, wdi_core, wdi_helpers\n'), ((4260, 4293), 'wikidataintegrator.wdi_login.WDLogin', 'wdi_login.WDLogin', (['WDUSER', 'WDPASS'], {}), '(WDUSER, WDPASS)\n', (4277, 4293), False, 'from wikidataintegrator import wdi_login, wdi_core, wdi_helpers\n'), ((2695, 2734), 'wikidataintegrator.wdi_core.WDItemEngine.log', 'wdi_core.WDItemEngine.log', (['"""WARNING"""', 'm'], {}), "('WARNING', m)\n", (2720, 2734), False, 'from wikidataintegrator import wdi_login, wdi_core, wdi_helpers\n')]
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import (
Category,
Post,
Profile,
Comment,
PostCreationForm,
PostEditForm,
UserLoginForm,
UserRegistrationForm,
UserUpdateForm,
ProfileUpdateForm,
CommentForm
)
from django.views.generic.edit import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.utils.text import slugify
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.urls import reverse
from django.contrib import messages
from django.db.models import Q
from django.core.paginator import Paginator
from django.template.loader import render_to_string
from django.views.generic.list import ListView
def home(requests):
post_list = Post.published.all().order_by('-id')
cat_section = Category.objects.all()
# regional = Post.published.filter(category="regional").order_by('-id')
# scintific = Post.published.filter(category="scintific").order_by('-id')
# physics = Post.published.filter(category="physics").order_by('-id')
# chemistry = Post.published.filter(category="chemistry").order_by('-id')
# mathematics = Post.published.filter(category="mathematics").order_by('-id')
# biology = Post.published.filter(category="biology").order_by('-id')
# sports = Post.published.filter(category="sports").order_by('-id')
# ai = Post.published.filter(category="ai").order_by('-id')
# offtopic = Post.published.filter(category="offtopic").order_by('-id')
# programming = Post.published.filter(category="programming").order_by('-id')
# datascience = Post.published.filter(category="datascience").order_by('-id')
# entrance_exam = Post.published.filter(category="entrance_exam").order_by('-id')
# travel = Post.published.filter(category="travel").order_by('-id')
# celebrity_talk = Post.published.filter(category="celebrity_talk").order_by('-id')
# world = Post.published.filter(category="world").order_by('-id')
# astronomy = Post.published.filter(category="astronomy").order_by('-id')
# engineering = Post.published.filter(category="engineering").order_by('-id')
# technology = Post.published.filter(category="technology").order_by('-id')
query = requests.GET.get("q")
if query:
post_list = Post.published.filter(
Q(title__icontains=query)
| Q(author__username=query)
| Q(body__icontains=query)
)
paginator = Paginator(post_list, 4)
page = requests.GET.get("page")
try:
page_obj = paginator.get_page(page)
except PageNotAnInteger:
page_obj = paginator.get_page(1)
except EmptyPage:
page_obj = paginator.get_page(paginator.num_page)
# function called for pagination
# regional = paginator(regional),
# scintific = paginator(scintific),
# physics = paginator(physics),
# chemistry = paginator(chemistry),
# mathematics = paginator(mathematics),
# biology = paginator(post_list),
# sports = paginator(sports),
# ai = paginator(ai),
# offtopic = paginator(offtopic),
# programming = paginator(programming),
# datascience = paginator(datascience),
# entrance_exam = paginator(entrance_exam),
# travel = paginator(travel),
# celebrity_talk = paginator(celebrity_talk),
# world = paginator(world),
# astronomy = paginator(astronomy),
# engineering = paginator(engineering),
# technology = paginator(technology),
context = {
"page_obj": page_obj,
"cat_section": cat_section
# "regional": regional,
# "scintific": scintific,
# "physics" : physics,
# "chemistry" : chemistry,
# "mathematics" : mathematics,
# "biology" : biology,
# "sports" : sports,
# "ai" : ai,
# "offtopic" : offtopic,
# "programming" : programming,
# "datascience" : datascience,
# "entrance_exam" : entrance_exam,
# "travel" : travel,
# "celebrity_talk" : celebrity_talk,
# "world" : world,
# "astronomy" : astronomy,
# "engineering" : engineering,
# "technology" : technology,
}
return render(requests, "blog/home.html", context)
def about(requests):
return render(requests, "blog/about.html")
def post_detail(requests, pk, slug):
post = get_object_or_404(Post, pk=pk, slug=slug)
comments = Comment.objects.filter(post=post, reply=None).order_by('-id')
is_liked = False
if post.likes.filter(id=requests.user.id).exists():
is_liked = True
if requests.method == "POST":
comment_form = CommentForm(requests.POST or None)
if comment_form.is_valid():
content = requests.POST.get('content')
reply_id = requests.POST.get('comment_id')
comment_qs = None
if reply_id:
comment_qs = Comment.objects.get(id=reply_id)
comment = Comment.objects.create(post=post, user=requests.user, content=content, reply=comment_qs)
comment.save()
messages.success(requests, f"Your comment has been posted.")
# return HttpResponseRedirect(post.get_absolute_url())
else:
comment_form = CommentForm()
context = {
"post": post,
"is_liked": is_liked,
"total_likes": post.total_likes(),
"comments": comments,
"comment_form": comment_form
}
if requests.is_ajax():
html = render_to_string("blog/comments.html", context, requests)
return JsonResponse({"form": html})
return render(requests, "blog/post_detail.html", context)
def like_post(requests):
post = get_object_or_404(Post, id=requests.POST.get("id"))
is_liked = False
if post.likes.filter(id=requests.user.id).exists():
post.likes.remove(requests.user)
is_liked = False
else:
post.likes.add(requests.user)
is_liked = True
context = {
"post": post,
"is_liked": is_liked,
"total_likes": post.total_likes(),
}
if requests.is_ajax():
html = render_to_string("blog/like_section.html", context, requests)
return JsonResponse({"form": html})
@login_required
def create_post(requests):
if requests.method == "POST":
form = PostCreationForm(requests.POST or None)
if form.is_valid():
post = form.save(commit=False)
post.author = requests.user
post.slug = slugify(post.title)
post.save()
messages.success(requests, f"Your post has been created!")
return HttpResponseRedirect(
reverse("post-detail", args=[post.pk, post.slug])
)
else:
form = PostCreationForm()
context = {
"form": form,
}
return render(requests, "blog/create_post.html", context)
@login_required
def edit_post(requests, pk):
post = get_object_or_404(Post, id=pk)
if post.author != requests.user:
raise Http404()
if requests.method == "POST":
form = PostEditForm(requests.POST or None, instance=post)
if form.is_valid():
form.save()
messages.success(requests, f"Your post has been updated!")
return HttpResponseRedirect(post.get_absolute_url())
else:
form = PostEditForm(instance=post)
context = {"form": form, "post": post}
return render(requests, "blog/edit_post.html", context)
@login_required
def delete_post(requests, pk):
post = get_object_or_404(Post, id=pk)
# post = Post.objects.get(id=id)
if post.author != requests.user:
raise Http404()
post.delete()
print(post)
return redirect('blog-home')
def user_registration(requests):
if requests.method == "POST":
form = UserRegistrationForm(requests.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
new_user = form.save()
Profile.objects.create(user=new_user)
messages.success(
requests,
f"Your account has been created for {username}! Please login now.",
)
return redirect("user-login")
else:
form = UserRegistrationForm()
context = {"form": form}
return render(requests, "blog/user_register.html", context)
def user_login(requests):
if requests.method == "POST":
form = UserLoginForm(requests.POST)
if form.is_valid():
username = requests.POST["username"]
password = requests.POST["password"]
user = authenticate(requests, username=username, password=password)
if user is not None:
login(requests, user)
messages.success(requests, f"You are now loggged in")
return HttpResponseRedirect(reverse("blog-home"))
else:
form = UserLoginForm()
context = {"form": form}
return render(requests, "blog/user_login.html", context)
def user_logout(requests):
logout(requests)
messages.success(requests, f"You have been loggged out")
return HttpResponseRedirect(reverse("blog-home"))
@login_required
def profile(requests):
if requests.method == "POST":
u_form = UserUpdateForm(requests.POST, instance=requests.user)
p_form = ProfileUpdateForm(
requests.POST, instance=requests.user.profile, files=requests.FILES
)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(requests, f"Your profile has been updated")
return redirect("profile")
else:
u_form = UserUpdateForm(instance=requests.user)
p_form = ProfileUpdateForm(instance=requests.user.profile)
context = {"u_form": u_form, "p_form": p_form}
return render(requests, "user/profile.html", context)
class CatListView(ListView):
template_name = 'blog/category.html'
context_object_name = 'catlist'
# paginate_by = 4
def get_queryset(self):
content = {
'cat': self.kwargs['category'],
'posts': Post.published.filter(category__name=self.kwargs['category']).filter(status='published'),
'all': Category.objects.all()
}
return content
|
[
"django.shortcuts.redirect",
"django.template.loader.render_to_string",
"django.db.models.Q",
"django.http.JsonResponse",
"django.utils.text.slugify",
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"django.contrib.auth.logout",
"django.core.paginator.Paginator",
"django.http.Http404",
"django.contrib.auth.authenticate",
"django.shortcuts.render",
"django.contrib.messages.success",
"django.contrib.auth.login"
] |
[((2637, 2660), 'django.core.paginator.Paginator', 'Paginator', (['post_list', '(4)'], {}), '(post_list, 4)\n', (2646, 2660), False, 'from django.core.paginator import Paginator\n'), ((4369, 4412), 'django.shortcuts.render', 'render', (['requests', '"""blog/home.html"""', 'context'], {}), "(requests, 'blog/home.html', context)\n", (4375, 4412), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((4447, 4482), 'django.shortcuts.render', 'render', (['requests', '"""blog/about.html"""'], {}), "(requests, 'blog/about.html')\n", (4453, 4482), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((4533, 4574), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'pk': 'pk', 'slug': 'slug'}), '(Post, pk=pk, slug=slug)\n', (4550, 4574), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((5772, 5822), 'django.shortcuts.render', 'render', (['requests', '"""blog/post_detail.html"""', 'context'], {}), "(requests, 'blog/post_detail.html', context)\n", (5778, 5822), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((6999, 7049), 'django.shortcuts.render', 'render', (['requests', '"""blog/create_post.html"""', 'context'], {}), "(requests, 'blog/create_post.html', context)\n", (7005, 7049), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((7108, 7138), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'id': 'pk'}), '(Post, id=pk)\n', (7125, 7138), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((7595, 7643), 'django.shortcuts.render', 'render', (['requests', '"""blog/edit_post.html"""', 'context'], {}), "(requests, 'blog/edit_post.html', context)\n", (7601, 7643), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((7703, 7733), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'id': 'pk'}), '(Post, id=pk)\n', (7720, 7733), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((7877, 7898), 'django.shortcuts.redirect', 'redirect', (['"""blog-home"""'], {}), "('blog-home')\n", (7885, 7898), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((8472, 8524), 'django.shortcuts.render', 'render', (['requests', '"""blog/user_register.html"""', 'context'], {}), "(requests, 'blog/user_register.html', context)\n", (8478, 8524), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((9126, 9175), 'django.shortcuts.render', 'render', (['requests', '"""blog/user_login.html"""', 'context'], {}), "(requests, 'blog/user_login.html', context)\n", (9132, 9175), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((9209, 9225), 'django.contrib.auth.logout', 'logout', (['requests'], {}), '(requests)\n', (9215, 9225), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((9230, 9286), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""You have been loggged out"""'], {}), "(requests, f'You have been loggged out')\n", (9246, 9286), False, 'from django.contrib import messages\n'), ((10026, 10072), 'django.shortcuts.render', 'render', (['requests', '"""user/profile.html"""', 'context'], {}), "(requests, 'user/profile.html', context)\n", (10032, 10072), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((5658, 5715), 'django.template.loader.render_to_string', 'render_to_string', (['"""blog/comments.html"""', 'context', 'requests'], {}), "('blog/comments.html', context, requests)\n", (5674, 5715), False, 'from django.template.loader import render_to_string\n'), ((5731, 5759), 'django.http.JsonResponse', 'JsonResponse', (["{'form': html}"], {}), "({'form': html})\n", (5743, 5759), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\n'), ((6288, 6349), 'django.template.loader.render_to_string', 'render_to_string', (['"""blog/like_section.html"""', 'context', 'requests'], {}), "('blog/like_section.html', context, requests)\n", (6304, 6349), False, 'from django.template.loader import render_to_string\n'), ((6365, 6393), 'django.http.JsonResponse', 'JsonResponse', (["{'form': html}"], {}), "({'form': html})\n", (6377, 6393), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\n'), ((7190, 7199), 'django.http.Http404', 'Http404', ([], {}), '()\n', (7197, 7199), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\n'), ((7822, 7831), 'django.http.Http404', 'Http404', ([], {}), '()\n', (7829, 7831), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\n'), ((9319, 9339), 'django.urls.reverse', 'reverse', (['"""blog-home"""'], {}), "('blog-home')\n", (9326, 9339), False, 'from django.urls import reverse\n'), ((5255, 5315), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""Your comment has been posted."""'], {}), "(requests, f'Your comment has been posted.')\n", (5271, 5315), False, 'from django.contrib import messages\n'), ((6663, 6682), 'django.utils.text.slugify', 'slugify', (['post.title'], {}), '(post.title)\n', (6670, 6682), False, 'from django.utils.text import slugify\n'), ((6719, 6777), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""Your post has been created!"""'], {}), "(requests, f'Your post has been created!')\n", (6735, 6777), False, 'from django.contrib import messages\n'), ((7364, 7422), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""Your post has been updated!"""'], {}), "(requests, f'Your post has been updated!')\n", (7380, 7422), False, 'from django.contrib import messages\n'), ((8200, 8298), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""Your account has been created for {username}! Please login now."""'], {}), "(requests,\n f'Your account has been created for {username}! Please login now.')\n", (8216, 8298), False, 'from django.contrib import messages\n'), ((8361, 8383), 'django.shortcuts.redirect', 'redirect', (['"""user-login"""'], {}), "('user-login')\n", (8369, 8383), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((8776, 8836), 'django.contrib.auth.authenticate', 'authenticate', (['requests'], {'username': 'username', 'password': 'password'}), '(requests, username=username, password=password)\n', (8788, 8836), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((9729, 9789), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""Your profile has been updated"""'], {}), "(requests, f'Your profile has been updated')\n", (9745, 9789), False, 'from django.contrib import messages\n'), ((9809, 9828), 'django.shortcuts.redirect', 'redirect', (['"""profile"""'], {}), "('profile')\n", (9817, 9828), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2585, 2609), 'django.db.models.Q', 'Q', ([], {'body__icontains': 'query'}), '(body__icontains=query)\n', (2586, 2609), False, 'from django.db.models import Q\n'), ((6835, 6884), 'django.urls.reverse', 'reverse', (['"""post-detail"""'], {'args': '[post.pk, post.slug]'}), "('post-detail', args=[post.pk, post.slug])\n", (6842, 6884), False, 'from django.urls import reverse\n'), ((8886, 8907), 'django.contrib.auth.login', 'login', (['requests', 'user'], {}), '(requests, user)\n', (8891, 8907), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((8924, 8977), 'django.contrib.messages.success', 'messages.success', (['requests', 'f"""You are now loggged in"""'], {}), "(requests, f'You are now loggged in')\n", (8940, 8977), False, 'from django.contrib import messages\n'), ((2505, 2530), 'django.db.models.Q', 'Q', ([], {'title__icontains': 'query'}), '(title__icontains=query)\n', (2506, 2530), False, 'from django.db.models import Q\n'), ((2545, 2570), 'django.db.models.Q', 'Q', ([], {'author__username': 'query'}), '(author__username=query)\n', (2546, 2570), False, 'from django.db.models import Q\n'), ((9022, 9042), 'django.urls.reverse', 'reverse', (['"""blog-home"""'], {}), "('blog-home')\n", (9029, 9042), False, 'from django.urls import reverse\n')]
|
from tensorflow.keras.callbacks import ModelCheckpoint
import os
def produce_callback():
checkpoint_filepath = "./run/weights-improvement-{epoch:02d}-{val_psnr_metric:.2f}-{val_ssim_metric:.2f}.hdf5"
os.makedirs(os.path.dirname(checkpoint_filepath), exist_ok=True)
model_checkpoint = ModelCheckpoint(checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
return model_checkpoint
|
[
"os.path.dirname",
"tensorflow.keras.callbacks.ModelCheckpoint"
] |
[((298, 402), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['checkpoint_filepath'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(checkpoint_filepath, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (313, 402), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((222, 258), 'os.path.dirname', 'os.path.dirname', (['checkpoint_filepath'], {}), '(checkpoint_filepath)\n', (237, 258), False, 'import os\n')]
|
from brownie import *
from brownie.network.contract import InterfaceContainer
import json
def loadConfig():
global contracts, acct
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet-ws":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
else:
raise Exception("Network not supported.")
contracts = json.load(configFile)
|
[
"json.load"
] |
[((1058, 1079), 'json.load', 'json.load', (['configFile'], {}), '(configFile)\n', (1067, 1079), False, 'import json\n')]
|
from Joueur import Joueur
from Plateau import Plateau
from BonusType import BonusType
from random import randrange
nbBonus = int(input("Nombre de bonus : "))
nbJoueur = int(input("Saisissez le nombre de joueur : "))
listJoueur = []
for i in range(0,nbJoueur):
listJoueur.append(Joueur(input("Nom du joueur " + str(i+1) + " : ")))
print("\n")
partieGagne = False
gagnant = None
Plateau = Plateau(nbBonus)
replay = False
tour = 0
while not partieGagne:
j = listJoueur[tour % nbJoueur]
print("*** Tour de " + j.name + " ***")
print("Lancer les dés ... : ", end="")
input()
lancerDe = randrange(1,6)
print(str(lancerDe))
tmpPos = j.pos + lancerDe
if tmpPos > 100:
print("Il faut faire " + str(100 - j.pos) + " pour gagner !\n")
tour += 1
continue
case = Plateau.content[tmpPos]
if case.hasBonus() and tmpPos != 100:
print("!!! BONUS : " + case.bonus.type.name + " !!!\n")
if case.bonus.type == BonusType.REJOUER:
replay = True
elif case.bonus.type == BonusType.DOUBLE:
tmpPos += lancerDe
elif case.bonus.type == BonusType.AVANCER_5_CASES:
tmpPos += 5
if tmpPos == 100:
partieGagne = True
gagnant = j
continue
elif tmpPos <= 100:
j.pos = tmpPos
if replay:
replay = False
continue
case = Plateau.content[tmpPos] # update Case for pouvoir
if case.hasPouvoir():
print("Attention ! C'est une case " + case.pouvoir.name + " direction la case : " + str(case.targetCase))
j.pos = case.targetCase
print ("Case : " + str(j.pos) + "\n")
tour += 1
print("Bravo ! Le gagnant est : " + gagnant.name)
|
[
"Plateau.Plateau",
"random.randrange"
] |
[((395, 411), 'Plateau.Plateau', 'Plateau', (['nbBonus'], {}), '(nbBonus)\n', (402, 411), False, 'from Plateau import Plateau\n'), ((612, 627), 'random.randrange', 'randrange', (['(1)', '(6)'], {}), '(1, 6)\n', (621, 627), False, 'from random import randrange\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 21:52:54 2019
@author: USER
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, roc_auc_score,f1_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utilities import load_census_data
from fairness_metrics import computeEDFforData
from DNN_model import NeuralNet,training_fair_model
#The function below ensures that we seed all random generators with the same value to get reproducible results
def set_random_seed(state=1):
gens = (np.random.seed, torch.manual_seed, torch.cuda.manual_seed)
for set_state in gens:
set_state(state)
RANDOM_STATE = 1
set_random_seed(RANDOM_STATE)
#%% data loading and pre-processing
# load the train dataset
X, y, S = load_census_data('data/adult.data',1)
# Define all the "intersectional groups" to maintain stochastic update of p(y|S) correctly among different batches
intersectionalGroups = np.unique(S,axis=0) # all intersecting groups, i.e. black-women, white-man etc
# load the test dataset
test_X, test_y, test_S = load_census_data('data/adult.test',0)
# scale/normalize train & test data and shuffle train data
scaler = StandardScaler().fit(X)
scale_df = lambda df, scaler: pd.DataFrame(scaler.transform(df), columns=df.columns, index=df.index)
X = X.pipe(scale_df, scaler)
test_X = test_X.pipe(scale_df, scaler)
X, y, S = sk.utils.shuffle(X, y, S, random_state=0)
X = X.values
y = y.values
S = S.values
test_X = test_X.values
test_y = test_y.values
test_S = test_S.values
X, dev_X, y, dev_y, S, dev_S = train_test_split(X, y, S, test_size=0.30,stratify=y, random_state=7)
#%%
# deep neural network using pytorch
trainData = torch.from_numpy(X)
trainLabel = torch.from_numpy(y.reshape((-1,1)))
#devData = torch.from_numpy(devData)
testData = torch.from_numpy(test_X)
devData = torch.from_numpy(dev_X)
# hyperparameters
input_size = trainData.size()[1]
hidden1 = 16
hidden2 = 16
hidden3 = 16
output_size = 1
num_epochs = 500
#burnIn = 50
stepSize = 0.1
learning_rate = 0.001
burnIn = 50
epsilonBase = torch.tensor(0.0) # To protect the 80%-rule in intersectional setting, set this variable to: - log(0.8) = 0.2231
#%%
import sys
sys.stdout=open("batch_DF_EPS0.txt","w")
#%% training DNN model with fairness constraint
# Train a fair classifier
lamda = torch.tensor(0.01) # λ is ahyper-parameter that balances between the prediction loss and fairness.
# Select λ for fair learning algorithms via rigorous grid search on the development set. See paper for details.
DF_Model = training_fair_model(input_size,hidden1,hidden2,hidden3,output_size,learning_rate,num_epochs,trainData,
trainLabel,S,intersectionalGroups,burnIn,stepSize,epsilonBase,lamda)
#%%
# Validate the model
with torch.no_grad():
devData = Variable(devData.float())
predictProb = DF_Model(devData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == dev_y)/len(dev_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF classifier dev accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(dev_y,predictProb)
print(f"DF classifier dev ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(dev_y,predicted)
print(f"DF classifier dev F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(dev_S,predicted,predictProb,intersectionalGroups)
print(f"DF classifier dev epsilon_hard: {epsilon_hard: .3f}")
print(f"DF classifier dev epsilon_soft: {epsilon_soft: .3f}")
print(f"DF classifier dev gamma_hard: {gamma_hard: .3f}")
print(f"DF classifier dev gamma_soft: {gamma_soft: .3f}")
print(f"DF classifier dev p_rule_hard: {p_rule_hard: .3f}")
print(f"DF classifier dev p_rule_soft: {p_rule_soft: .3f}")
#%%
# Test the model
with torch.no_grad():
testData = Variable(testData.float())
predictProb = DF_Model(testData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == test_y)/len(test_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF_Classifier accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(test_y,predictProb)
print(f"DF_Classifier ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(test_y,predicted)
print(f"DF_Classifier F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(test_S,predicted,predictProb,intersectionalGroups)
print(f"DF_Classifier epsilon_hard: {epsilon_hard: .3f}")
print(f"DF_Classifier epsilon_soft: {epsilon_soft: .3f}")
print(f"DF_Classifier gamma_hard: {gamma_hard: .3f}")
print(f"DF_Classifier gamma_soft: {gamma_soft: .3f}")
print(f"DF_Classifier p_rule_hard: {p_rule_hard: .3f}")
print(f"DF_Classifier p_rule_soft: {p_rule_soft: .3f}")
|
[
"torch.tensor",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"utilities.load_census_data",
"sklearn.metrics.roc_auc_score",
"DNN_model.training_fair_model",
"sklearn.metrics.f1_score",
"fairness_metrics.computeEDFforData",
"sklearn.utils.shuffle",
"torch.no_grad",
"numpy.unique",
"torch.from_numpy"
] |
[((971, 1009), 'utilities.load_census_data', 'load_census_data', (['"""data/adult.data"""', '(1)'], {}), "('data/adult.data', 1)\n", (987, 1009), False, 'from utilities import load_census_data\n'), ((1148, 1168), 'numpy.unique', 'np.unique', (['S'], {'axis': '(0)'}), '(S, axis=0)\n', (1157, 1168), True, 'import numpy as np\n'), ((1278, 1316), 'utilities.load_census_data', 'load_census_data', (['"""data/adult.test"""', '(0)'], {}), "('data/adult.test', 0)\n", (1294, 1316), False, 'from utilities import load_census_data\n'), ((1591, 1632), 'sklearn.utils.shuffle', 'sk.utils.shuffle', (['X', 'y', 'S'], {'random_state': '(0)'}), '(X, y, S, random_state=0)\n', (1607, 1632), True, 'import sklearn as sk\n'), ((1778, 1846), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'S'], {'test_size': '(0.3)', 'stratify': 'y', 'random_state': '(7)'}), '(X, y, S, test_size=0.3, stratify=y, random_state=7)\n', (1794, 1846), False, 'from sklearn.model_selection import train_test_split\n'), ((1900, 1919), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (1916, 1919), False, 'import torch\n'), ((2041, 2065), 'torch.from_numpy', 'torch.from_numpy', (['test_X'], {}), '(test_X)\n', (2057, 2065), False, 'import torch\n'), ((2076, 2099), 'torch.from_numpy', 'torch.from_numpy', (['dev_X'], {}), '(dev_X)\n', (2092, 2099), False, 'import torch\n'), ((2300, 2317), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2312, 2317), False, 'import torch\n'), ((2555, 2573), 'torch.tensor', 'torch.tensor', (['(0.01)'], {}), '(0.01)\n', (2567, 2573), False, 'import torch\n'), ((2806, 2998), 'DNN_model.training_fair_model', 'training_fair_model', (['input_size', 'hidden1', 'hidden2', 'hidden3', 'output_size', 'learning_rate', 'num_epochs', 'trainData', 'trainLabel', 'S', 'intersectionalGroups', 'burnIn', 'stepSize', 'epsilonBase', 'lamda'], {}), '(input_size, hidden1, hidden2, hidden3, output_size,\n learning_rate, num_epochs, trainData, trainLabel, S,\n intersectionalGroups, burnIn, stepSize, epsilonBase, lamda)\n', (2825, 2998), False, 'from DNN_model import NeuralNet, training_fair_model\n'), ((3376, 3409), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['dev_y', 'predictProb'], {}), '(dev_y, predictProb)\n', (3389, 3409), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((3470, 3496), 'sklearn.metrics.f1_score', 'f1_score', (['dev_y', 'predicted'], {}), '(dev_y, predicted)\n', (3478, 3496), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((3622, 3692), 'fairness_metrics.computeEDFforData', 'computeEDFforData', (['dev_S', 'predicted', 'predictProb', 'intersectionalGroups'], {}), '(dev_S, predicted, predictProb, intersectionalGroups)\n', (3639, 3692), False, 'from fairness_metrics import computeEDFforData\n'), ((4414, 4448), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_y', 'predictProb'], {}), '(test_y, predictProb)\n', (4427, 4448), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((4505, 4532), 'sklearn.metrics.f1_score', 'f1_score', (['test_y', 'predicted'], {}), '(test_y, predicted)\n', (4513, 4532), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((4654, 4725), 'fairness_metrics.computeEDFforData', 'computeEDFforData', (['test_S', 'predicted', 'predictProb', 'intersectionalGroups'], {}), '(test_S, predicted, predictProb, intersectionalGroups)\n', (4671, 4725), False, 'from fairness_metrics import computeEDFforData\n'), ((3040, 3055), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3053, 3055), False, 'import torch\n'), ((4077, 4092), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4090, 4092), False, 'import torch\n'), ((1385, 1401), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1399, 1401), False, 'from sklearn.preprocessing import StandardScaler\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 13:42:37 2019
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import cycler
def spectral_decay(case = 4,
vname = 'example_0',
plot_type = 'val',
save = False):
## Parameters & Settings
# 0: Barbell
# 1: Tree
# 2: Gaussian Mix
# 3: Hyperuniform Circle
# 4: Hyperuniform Ellipse
## Parameters & Settings
# case = 4 # 0: Barbell; 1: Tree; 2: Gaussian Mix; 3/4: Hyperuniform Circle/Ellipse
# vname = 'example_0' # spectial tag for video file
colormap = cm.viridis # cmap for viz
# plot_type = 'val' # 'vec': right singular vec embed; 'val': singula val; '3d': 3D
# save = False # Save figure?
save_type = '.pdf'
psi_min = 2 # min right singular vector to plot; 2, 3, ..., 11
psi_max = psi_min + 1 # max right singular vector to plot
psi3d_min = 2 # 2, 3, ..., 11
psi3d_mid = psi3d_min + 1
psi3d_max = psi3d_min + 2
for sig in np.arange(2,12):
if case == 0:
fname = 'barbell'
cdir = 'barbell/'
plot_title = 'Barbell'
elif case == 1:
fname = 'tree'
cdir = 'tree/'
plot_title = 'Tree'
elif case == 2:
fname = 'gauss'
cdir = 'gauss/'
plot_title = 'Gauss'
elif case == 3:
fname = 'hyperuni_circle'
cdir = 'h_circle/'
plot_title = 'Hyperuniform Circle'
elif case == 4:
fname = 'hyperuni_ellipse'
cdir = 'ellipse/'
plot_title = 'Hyperuniform Ellipse'
sname = fname + '_' + vname # save name tag
fname += vname # load name tag
# Get # of Iterations
iter_name = 'dm/'+cdir+'iterations_'+fname+'.npy'
iterations = np.load(iter_name)
# Get Diffusion Maps Spectra
eDM_name = 'dm/'+cdir+'E_'+fname+'.npy'; eDM_sig = np.load(eDM_name)[sig - 2]
# Initialize Specra Lists
ei = []; et = []; eDM = []
# Get Epsilon (shape = (2, #iterations), 0th axis #eps doublings, 1st axis eps)
eps_name = 'dm/'+cdir+'epsilon_list_'+fname+'.npy'; eps_adjust = np.load(eps_name)
# Get Number of Points in Dataset & Color
datasize_name = 'dm/'+cdir+'V_'+fname+'.npy'; N = np.load(datasize_name).shape[0]
C_name = 'dm/'+cdir+'C_'+fname+'.npy'; C = np.load(C_name)
#%%
for i in np.arange(1, 1+iterations):
'''Singular Values (DM for Changing Data, TCDM) & Eigenvalues (DM)'''
pi_name = 'p_i/'+cdir+'Ei_'+str(i)+'_'+fname+'.npy'
pt_name = 'p_t/'+cdir+'Et_'+str(i)+'_'+fname+'.npy'
ei.append([i, np.load(pi_name)[sig - 2]]) # Operator P_i
et.append([i, np.load(pt_name)[sig - 2]]) # Composed Operator P^((t))
eDM.append([i, eDM_sig**i]) # Diffusion Maps Operator P^{t}
if plot_type == 'val':
plt.subplot(311)
plt.plot(np.asarray(ei).T[0], np.asarray(ei).T[1], marker='o', label=r'$P_{\epsilon,i}$', color = 'c')
plt.subplot(312)
plt.plot(np.asarray(et).T[0], np.asarray(et).T[1], marker='o', label=r'$P_{\epsilon}^{(t)}$', color = 'purple')
plt.ylabel(r"$\sigma_{k}$")
plt.subplot(313)
plt.plot(np.asarray(eDM).T[0], np.asarray(eDM).T[1], marker='o', label=r'$P_{\epsilon}^{t}$', color = 'g')
plt.xlabel("Iteration")
# plt.show()
save_dir = 'figs/spectral/'+cdir
save_name = sname+'_sigma'+str(sig)+'_N-'+str(N)
elif plot_type == 'vec':
# Set Singular Vectors
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
# Generate Figure
plt.title(r'$\psi_{'+str(psi_min)+'}$ & $\psi_{'+str(psi_max)+'}$ for '+plot_title+' (N = '+str(N)+')')
plt.scatter(psiDM[:,psi_min-2], psiDM[:,psi_max-2], c=C, cmap = colormap,
vmin=np.amin(C), vmax=np.amax(C), label=r'$P_{\epsilon}^{t}$')
plt.xlabel(r'$\psi_{'+str(psi_min)+'}$')
plt.ylabel(r'$\psi_{'+str(psi_max)+'}$')
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi_min)+'_psi'+str(psi_max)+'_N-'+str(N)
elif plot_type == '3d':
# Set Singular Vectors and Plot
from mpl_toolkits.mplot3d import Axes3D
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(r'$\psi_{'+str(psi3d_min)+'}$, $\psi_{'+str(psi3d_mid)+
'}$, & $\psi_{'+str(psi3d_max)+'}$ Embedding of '
+plot_title+' (N = '+str(N)+')')
ax.scatter(psiDM[:, psi3d_min - 2], psiDM[:, psi3d_mid - 2],
psiDM[:, psi3d_max - 2], c=C, cmap=colormap)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi3d_min)+'_psi'+str(
psi3d_mid)+'_psi'+str(psi3d_max)+'_N-'+str(N)
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
return plt.show()
if "__name__" == "__main__":
spectral_decay()
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
|
[
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.asarray",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1133, 1149), 'numpy.arange', 'np.arange', (['(2)', '(12)'], {}), '(2, 12)\n', (1142, 1149), True, 'import numpy as np\n'), ((6195, 6205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6203, 6205), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2028), 'numpy.load', 'np.load', (['iter_name'], {}), '(iter_name)\n', (2017, 2028), True, 'import numpy as np\n'), ((2409, 2426), 'numpy.load', 'np.load', (['eps_name'], {}), '(eps_name)\n', (2416, 2426), True, 'import numpy as np\n'), ((2627, 2642), 'numpy.load', 'np.load', (['C_name'], {}), '(C_name)\n', (2634, 2642), True, 'import numpy as np\n'), ((2681, 2709), 'numpy.arange', 'np.arange', (['(1)', '(1 + iterations)'], {}), '(1, 1 + iterations)\n', (2690, 2709), True, 'import numpy as np\n'), ((6104, 6174), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(300)'}), "(save_name, bbox_inches='tight', transparent=True, dpi=300)\n", (6115, 6174), True, 'import matplotlib.pyplot as plt\n'), ((6372, 6442), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(300)'}), "(save_name, bbox_inches='tight', transparent=True, dpi=300)\n", (6383, 6442), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2151), 'numpy.load', 'np.load', (['eDM_name'], {}), '(eDM_name)\n', (2141, 2151), True, 'import numpy as np\n'), ((3216, 3232), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (3227, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3376), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (3371, 3376), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3540), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_{k}$"""'], {}), "('$\\\\sigma_{k}$')\n", (3523, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3553, 3569), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (3564, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3724), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (3711, 3724), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2566), 'numpy.load', 'np.load', (['datasize_name'], {}), '(datasize_name)\n', (2551, 2566), True, 'import numpy as np\n'), ((4045, 4064), 'numpy.load', 'np.load', (['psiDM_name'], {}), '(psiDM_name)\n', (4052, 4064), True, 'import numpy as np\n'), ((4547, 4679), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'right': '(False)', 'left': '(False)', 'labelleft': '(False)'}), "(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False, labelleft=False)\n", (4562, 4679), True, 'import matplotlib.pyplot as plt\n'), ((4725, 4735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4733, 4735), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5137), 'numpy.load', 'np.load', (['psiDM_name'], {}), '(psiDM_name)\n', (5125, 5137), True, 'import numpy as np\n'), ((5173, 5185), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5183, 5185), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5797, 5799), True, 'import matplotlib.pyplot as plt\n'), ((2958, 2974), 'numpy.load', 'np.load', (['pi_name'], {}), '(pi_name)\n', (2965, 2974), True, 'import numpy as np\n'), ((3027, 3043), 'numpy.load', 'np.load', (['pt_name'], {}), '(pt_name)\n', (3034, 3043), True, 'import numpy as np\n'), ((3254, 3268), 'numpy.asarray', 'np.asarray', (['ei'], {}), '(ei)\n', (3264, 3268), True, 'import numpy as np\n'), ((3275, 3289), 'numpy.asarray', 'np.asarray', (['ei'], {}), '(ei)\n', (3285, 3289), True, 'import numpy as np\n'), ((3398, 3412), 'numpy.asarray', 'np.asarray', (['et'], {}), '(et)\n', (3408, 3412), True, 'import numpy as np\n'), ((3419, 3433), 'numpy.asarray', 'np.asarray', (['et'], {}), '(et)\n', (3429, 3433), True, 'import numpy as np\n'), ((3591, 3606), 'numpy.asarray', 'np.asarray', (['eDM'], {}), '(eDM)\n', (3601, 3606), True, 'import numpy as np\n'), ((3613, 3628), 'numpy.asarray', 'np.asarray', (['eDM'], {}), '(eDM)\n', (3623, 3628), True, 'import numpy as np\n'), ((4359, 4369), 'numpy.amin', 'np.amin', (['C'], {}), '(C)\n', (4366, 4369), True, 'import numpy as np\n'), ((4376, 4386), 'numpy.amax', 'np.amax', (['C'], {}), '(C)\n', (4383, 4386), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\conta\Documents\script\Wizard\App\work\ui_files\server_widget.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(384, 564)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_2 = QtWidgets.QFrame(Form)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_2)
self.horizontalLayout_2.setContentsMargins(1, 1, 1, 1)
self.horizontalLayout_2.setSpacing(1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.server_quit_pushButton = QtWidgets.QPushButton(self.frame_2)
self.server_quit_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.server_quit_pushButton.setObjectName("server_quit_pushButton")
self.horizontalLayout_2.addWidget(self.server_quit_pushButton)
self.server_reduce_pushButton = QtWidgets.QPushButton(self.frame_2)
self.server_reduce_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.server_reduce_pushButton.setObjectName("server_reduce_pushButton")
self.horizontalLayout_2.addWidget(self.server_reduce_pushButton)
self.verticalLayout.addWidget(self.frame_2)
self.frame_3 = QtWidgets.QFrame(Form)
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.server_project_label = QtWidgets.QLabel(self.frame_3)
self.server_project_label.setObjectName("server_project_label")
self.horizontalLayout_3.addWidget(self.server_project_label)
self.verticalLayout.addWidget(self.frame_3)
self.frame = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout.setObjectName("horizontalLayout")
self.server_ip_label = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server_ip_label.sizePolicy().hasHeightForWidth())
self.server_ip_label.setSizePolicy(sizePolicy)
self.server_ip_label.setObjectName("server_ip_label")
self.horizontalLayout.addWidget(self.server_ip_label)
self.server_connexions_label = QtWidgets.QLabel(self.frame)
self.server_connexions_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.server_connexions_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.server_connexions_label.setObjectName("server_connexions_label")
self.horizontalLayout.addWidget(self.server_connexions_label)
self.verticalLayout.addWidget(self.frame)
self.server__users_verticalFrame = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server__users_verticalFrame.sizePolicy().hasHeightForWidth())
self.server__users_verticalFrame.setSizePolicy(sizePolicy)
self.server__users_verticalFrame.setObjectName("server__users_verticalFrame")
self.users_layout_0 = QtWidgets.QVBoxLayout(self.server__users_verticalFrame)
self.users_layout_0.setObjectName("users_layout_0")
self.users_layout = QtWidgets.QVBoxLayout()
self.users_layout.setObjectName("users_layout")
self.users_layout_0.addLayout(self.users_layout)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.users_layout_0.addItem(spacerItem)
self.verticalLayout.addWidget(self.server__users_verticalFrame)
self.log_textEdit = QtWidgets.QTextEdit(Form)
self.log_textEdit.setReadOnly(True)
self.log_textEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.log_textEdit.setObjectName("log_textEdit")
self.verticalLayout.addWidget(self.log_textEdit)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.server_quit_pushButton.setText(_translate("Form", "Quit"))
self.server_reduce_pushButton.setText(_translate("Form", "Reduce"))
self.server_project_label.setText(_translate("Form", "Project"))
self.server_ip_label.setText(_translate("Form", "IP Label"))
self.server_connexions_label.setText(_translate("Form", "Connexions"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtWidgets.QFrame",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QApplication"
] |
[((6016, 6048), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6038, 6048), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6060, 6079), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (6077, 6079), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((452, 479), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['Form'], {}), '(Form)\n', (473, 479), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((664, 686), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['Form'], {}), '(Form)\n', (680, 686), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((893, 928), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.frame_2'], {}), '(self.frame_2)\n', (914, 928), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1144, 1179), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.frame_2'], {}), '(self.frame_2)\n', (1165, 1179), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1439, 1474), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.frame_2'], {}), '(self.frame_2)\n', (1460, 1474), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1777, 1799), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['Form'], {}), '(Form)\n', (1793, 1799), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2006, 2041), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.frame_3'], {}), '(self.frame_3)\n', (2027, 2041), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2146, 2176), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame_3'], {}), '(self.frame_3)\n', (2162, 2176), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2391, 2413), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['Form'], {}), '(Form)\n', (2407, 2413), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2435, 2523), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Fixed)\n', (2456, 2523), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2926, 2959), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.frame'], {}), '(self.frame)\n', (2947, 2959), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3055, 3083), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame'], {}), '(self.frame)\n', (3071, 3083), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3105, 3193), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Fixed)\n', (3126, 3193), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3583, 3611), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame'], {}), '(self.frame)\n', (3599, 3611), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4051, 4073), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['Form'], {}), '(Form)\n', (4067, 4073), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4095, 4187), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Expanding)\n', (4116, 4187), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4554, 4609), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.server__users_verticalFrame'], {}), '(self.server__users_verticalFrame)\n', (4575, 4609), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4698, 4721), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (4719, 4721), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4856, 4954), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(20)', '(40)', 'QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Expanding'], {}), '(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.\n QSizePolicy.Expanding)\n', (4877, 4954), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5098, 5123), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['Form'], {}), '(Form)\n', (5117, 5123), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5402, 5445), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (5439, 5445), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1231, 1250), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(40)'], {}), '(0, 40)\n', (1243, 1250), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1528, 1547), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(40)'], {}), '(0, 40)\n', (1540, 1547), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import cv2
import numpy as np
import mediapipe as mp
import glob
import os
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
#import os
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp'
# wait for process: "W016","W017","W018","W019","W023","W024","W025","W026","W028","W029","W033","W035","W036","W037","W038","W040"
# M028 M029 M030 M031 M032 M033 M034 M035 M037 M039 M040 M041 W009 W011 W014 M042 W015
# M012 M013 M022 M026 M027 M031 M037 M041 W014
# W016 W018 W019 W023 W024 W025 W026 W028 W029 W033 W035 W036
# W040 W038 W037
in_path = glob.glob('/data3/MEAD/W036/video/front/*/level_*/0*.mp4')
#in_path = glob.glob('/data3/MEAD/M012/video/front/disgusted/level_2/027.mp4')
#print(in_path)
out_path = []
out_path_initlmk = []
out_path_motion = []
for pid,path in enumerate(in_path):
#print(pid,path)
p,f = os.path.split(path)
na,ext = os.path.splitext(f)
#print(p+"/"+na+"_multiland.npy")
out_path.append(p+"/"+na+"_multiland.npy")
out_path_initlmk.append(p+"/"+na+"_initlmk_multiland.npy")
out_path_motion.append(p+"/"+na+"_motion_multiland.npy")
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
def vis_landmark_on_img(img, shape, linewidth=2):
'''
Visualize landmark on images.
'''
def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth):
for i in idx_list:
cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth)
if (loop):
cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]),
(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)
draw_curve(list(range(0, 16)), color=(255, 144, 25)) # jaw
draw_curve(list(range(17, 21)), color=(50, 205, 50)) # eye brow
draw_curve(list(range(22, 26)), color=(50, 205, 50))
draw_curve(list(range(27, 35)), color=(208, 224, 63)) # nose
draw_curve(list(range(36, 41)), loop=True, color=(71, 99, 255)) # eyes
draw_curve(list(range(42, 47)), loop=True, color=(71, 99, 255))
draw_curve(list(range(48, 59)), loop=True, color=(238, 130, 238)) # mouth
draw_curve(list(range(60, 67)), loop=True, color=(238, 130, 238))
return img
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
for vid,vpath in enumerate(in_path):
videoReader = cv2.VideoCapture(in_path[vid])
fs = videoReader.get(cv2.CAP_PROP_FPS)
sz = (int(videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)), int(videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#vw = cv2.VideoWriter('./output/video.mp4',cv2.VideoWriter_fourcc('M','P','E','G'), fs, sz)
land_res = [] # 帧数 * 3 * landmark数量
motion_res = []
initlmk_res = []
success, frame = videoReader.read()
idx = 0
k = 0
while success:
#print(success)
#print(k)
k += 1
image = frame.copy()
#cv2.imwrite("./imgs/"+str(k)+"_im.png",image)
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
success, frame = videoReader.read() #获取下一帧
continue
face_landmarks = results.multi_face_landmarks[0]
land_loc = []
xlis = []
ylis = []
zlis = []
for lm in face_landmarks.landmark:
x = lm.x * sz[0]
y = lm.y * sz[1]
xlis.append(x)
ylis.append(y)
zlis.append(lm.z)
#print(x,y,lm.z)
land_loc.append(xlis)
land_loc.append(ylis)
land_loc.append(zlis)
land_res.append(land_loc)
if idx == 0 : initlmk_res.append(land_loc)
motion_res.append( list( np.array(land_loc) - np.array(land_res[ len(land_res) - 1 ]) ) )
idx += 1
# for face_landmarks in results.multi_face_landmarks:
# mp_drawing.draw_landmarks(
# image=image,
# landmark_list=face_landmarks,
# connections=mp_face_mesh.FACEMESH_CONTOURS,
# landmark_drawing_spec=drawing_spec,
# connection_drawing_spec=drawing_spec)
#cv2.imwrite('./output/video' + str(idx) + '.png', image)
#vw.write(image) # 写视频帧
success, frame = videoReader.read() #获取下一帧
videoReader.release()
#vw.release()
res = np.array(land_res)
np.save(out_path[vid],res)
#np.save(out_path_initlmk[vid],initlmk_res)
#np.save(out_path_motion[vid],motion_res)
print("out:"+out_path[vid])
|
[
"cv2.line",
"numpy.save",
"cv2.cvtColor",
"cv2.VideoCapture",
"numpy.array",
"os.path.splitext",
"glob.glob",
"os.path.split"
] |
[((612, 670), 'glob.glob', 'glob.glob', (['"""/data3/MEAD/W036/video/front/*/level_*/0*.mp4"""'], {}), "('/data3/MEAD/W036/video/front/*/level_*/0*.mp4')\n", (621, 670), False, 'import glob\n'), ((891, 910), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (904, 910), False, 'import os\n'), ((924, 943), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (940, 943), False, 'import os\n'), ((2471, 2501), 'cv2.VideoCapture', 'cv2.VideoCapture', (['in_path[vid]'], {}), '(in_path[vid])\n', (2487, 2501), False, 'import cv2\n'), ((4695, 4713), 'numpy.array', 'np.array', (['land_res'], {}), '(land_res)\n', (4703, 4713), True, 'import numpy as np\n'), ((4722, 4749), 'numpy.save', 'np.save', (['out_path[vid]', 'res'], {}), '(out_path[vid], res)\n', (4729, 4749), True, 'import numpy as np\n'), ((1445, 1545), 'cv2.line', 'cv2.line', (['img', '(shape[i, 0], shape[i, 1])', '(shape[i + 1, 0], shape[i + 1, 1])', 'color', 'lineWidth'], {}), '(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]\n ), color, lineWidth)\n', (1453, 1545), False, 'import cv2\n'), ((1572, 1714), 'cv2.line', 'cv2.line', (['img', '(shape[idx_list[0], 0], shape[idx_list[0], 1])', '(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1])', 'color', 'lineWidth'], {}), '(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]), (shape[\n idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)\n', (1580, 1714), False, 'import cv2\n'), ((3157, 3195), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3169, 3195), False, 'import cv2\n'), ((3971, 3989), 'numpy.array', 'np.array', (['land_loc'], {}), '(land_loc)\n', (3979, 3989), True, 'import numpy as np\n')]
|
# coding=utf-8
from tornado.web import RequestHandler, HTTPError
from tornado.gen import coroutine
from bson import ObjectId
from bson.json_util import dumps, loads
__author__ = '<EMAIL>'
__date__ = "2018/12/14 下午9:58"
UID_KEY = 'uid'
class LoginHandler(RequestHandler):
def check_xsrf_cookie(self):
pass
def get(self):
if self.application.settings['debug']:
self.set_secure_cookie(UID_KEY, 'TEST_USER')
self.finish({'success': True, 'msg': '登录成功'})
else:
raise HTTPError(404)
def post(self):
user = loads(self.request.body)
self.set_secure_cookie(UID_KEY, user['name'])
self.finish({'success': True, 'msg': '登录成功'})
def put(self):
self.clear_cookie(UID_KEY)
self.finish({'success': True, 'msg': '退出成功'})
|
[
"tornado.web.HTTPError",
"bson.json_util.loads"
] |
[((587, 611), 'bson.json_util.loads', 'loads', (['self.request.body'], {}), '(self.request.body)\n', (592, 611), False, 'from bson.json_util import dumps, loads\n'), ((536, 550), 'tornado.web.HTTPError', 'HTTPError', (['(404)'], {}), '(404)\n', (545, 550), False, 'from tornado.web import RequestHandler, HTTPError\n')]
|
from insights import combiner
from insights.combiners.hostname import hostname
from insights.core.context import create_product
from insights.parsers.metadata import MetadataJson
from insights.specs import Specs
@combiner(MetadataJson, [hostname, Specs.machine_id])
def multinode_product(md, hn, machine_id):
hn = hn.fqdn if hn else machine_id.content[0].rstrip()
return create_product(md.data, hn)
@combiner(multinode_product)
def docker(product):
if product and product.name == "docker":
return product
@combiner(multinode_product)
def OSP(product):
if product and product.name == "osp":
return product
@combiner(multinode_product)
def RHEV(product):
if product and product.name == "rhev":
return product
@combiner(multinode_product)
def RHEL(product):
if product and product.name == "rhel":
return product
|
[
"insights.core.context.create_product",
"insights.combiner"
] |
[((215, 267), 'insights.combiner', 'combiner', (['MetadataJson', '[hostname, Specs.machine_id]'], {}), '(MetadataJson, [hostname, Specs.machine_id])\n', (223, 267), False, 'from insights import combiner\n'), ((412, 439), 'insights.combiner', 'combiner', (['multinode_product'], {}), '(multinode_product)\n', (420, 439), False, 'from insights import combiner\n'), ((532, 559), 'insights.combiner', 'combiner', (['multinode_product'], {}), '(multinode_product)\n', (540, 559), False, 'from insights import combiner\n'), ((646, 673), 'insights.combiner', 'combiner', (['multinode_product'], {}), '(multinode_product)\n', (654, 673), False, 'from insights import combiner\n'), ((762, 789), 'insights.combiner', 'combiner', (['multinode_product'], {}), '(multinode_product)\n', (770, 789), False, 'from insights import combiner\n'), ((381, 408), 'insights.core.context.create_product', 'create_product', (['md.data', 'hn'], {}), '(md.data, hn)\n', (395, 408), False, 'from insights.core.context import create_product\n')]
|
"""
Performance Comparision with Commercial APIs like Face++, Google, MS and Amazon
"""
import sys
import os
import requests
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
sys.path.append('../')
from config.cfg import cfg
def prepare_test_imgs(type='basic'):
face_with_emt = {}
manual_annotation_dir = 'E:\DataSet\CV\TreeCNN\RAF-Face/basic\Annotation\manual'
emotion_label_txt_path = os.path.join(cfg['root'], 'RAF-Face', "%s/EmoLabel/list_patition_label.txt" % type)
emotion_dict = dict(np.loadtxt(emotion_label_txt_path, dtype=np.str))
for _ in os.listdir(manual_annotation_dir):
if _.startswith('test_'):
face_fname = _.replace('_manu_attri', '_aligned').replace('.txt', '.jpg')
face_with_emt[os.path.join(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)] = int(
emotion_dict[face_fname.replace('_aligned', '')].strip()) - 1
return face_with_emt
def facepp(img_path):
"""
Recognition with Face++ Emotion Recognition API
:param img_path:
:return:
"""
apikey = ''
apisecret = ''
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
files = {'image_file': open(img_path, 'rb')}
payload = {'api_key': apikey, 'api_secret': apisecret,
# 'return_landmark': 0, 'face_tokens': 'none',
'return_attributes': 'emotion'}
response = requests.post(url, data=payload, files=files)
if response.status_code == 200:
res_json = response.json()
max_k = ''
max_v = 0
for k, v in res_json['faces'][0]['attributes']['emotion'].items():
if v > max_v:
max_v = v
max_k = k
return max_k
else:
print(response)
return None
if __name__ == '__main__':
img_files = prepare_test_imgs()
print(img_files)
basic_emt_map = {
'surprise': 0,
'fear': 1,
'disgust': 2,
'happiness': 3,
'sadness': 4,
'anger': 5,
'neutral': 6
}
gt = []
pred = []
for imgf, e in img_files.items():
try:
emt = facepp(imgf)
print(emt)
gt.append(e)
pred.append(basic_emt_map[emt])
except:
pass
print('Accuracy of Emotion Recognition: %s' % str(accuracy_score(gt, pred)))
print('Confusion Matrix on FER: ')
print(confusion_matrix(np.array(gt).ravel().tolist(), np.array(pred).ravel().tolist()))
|
[
"sys.path.append",
"sklearn.metrics.accuracy_score",
"numpy.array",
"numpy.loadtxt",
"requests.post",
"os.path.join",
"os.listdir"
] |
[((254, 276), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (269, 276), False, 'import sys\n'), ((481, 568), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/EmoLabel/list_patition_label.txt' % type)"], {}), "(cfg['root'], 'RAF-Face', '%s/EmoLabel/list_patition_label.txt' %\n type)\n", (493, 568), False, 'import os\n'), ((653, 686), 'os.listdir', 'os.listdir', (['manual_annotation_dir'], {}), '(manual_annotation_dir)\n', (663, 686), False, 'import os\n'), ((1480, 1525), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'files': 'files'}), '(url, data=payload, files=files)\n', (1493, 1525), False, 'import requests\n'), ((589, 637), 'numpy.loadtxt', 'np.loadtxt', (['emotion_label_txt_path'], {'dtype': 'np.str'}), '(emotion_label_txt_path, dtype=np.str)\n', (599, 637), True, 'import numpy as np\n'), ((834, 910), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/Image/aligned' % type)", 'face_fname'], {}), "(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)\n", (846, 910), False, 'import os\n'), ((2421, 2445), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['gt', 'pred'], {}), '(gt, pred)\n', (2435, 2445), False, 'from sklearn.metrics import accuracy_score\n'), ((2515, 2527), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (2523, 2527), True, 'import numpy as np\n'), ((2546, 2560), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (2554, 2560), True, 'import numpy as np\n')]
|
import pycurl
from io import BytesIO
import json
import datetime
import pandas as pd
myaddress = input('Enter Bitcoin Address: ')
btcval = 100000000.0 # in santoshis
block_time_in_min = 10
block_time_in_sec = block_time_in_min*60
def getBalance(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/unspent?active=%s" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
balance = 0.0
# print("getreq = %s" % (getreq.getvalue()))
allunspenttx = json.loads(strbuf.getvalue())['unspent_outputs']
for eachtx in allunspenttx:
balance += eachtx['value']
return balance
def getTxnHistoryOfAddress(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/address/%s?format=json" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
new_txn_list = []
alltxns = json.loads(strbuf.getvalue())['txs']
for eachtxn in alltxns:
new_txn = {}
input_list = eachtxn['inputs']
input_value = 0
address_input_value = 0
for each_input in input_list:
input_value += each_input['prev_out']['value']
if each_input['prev_out']['addr'] == address:
address_input_value += each_input['prev_out']['value']
output_list = eachtxn['out']
output_value = 0
address_output_value = 0
for each_output in output_list:
output_value += each_output['value']
if each_output['addr'] == address:
address_output_value += each_output['value']
if address_input_value > address_output_value:
new_txn['credit_in_btc'] = (address_input_value - address_output_value) / btcval
else:
new_txn['debit_in_btc'] = (address_output_value - address_input_value) / btcval
network_fees = input_value - output_value
new_txn['network_fees'] = network_fees / btcval
new_txn['network_fees_in_inr'] = new_txn['network_fees'] * getCurrentSellPriceInInr()
dt = datetime.datetime.fromtimestamp(eachtxn['time'])
new_txn['date_time'] = dt.strftime("%d-%B-%Y %H:%M:%S")
new_txn_list.append(new_txn)
return new_txn_list
def getCurrentBlockHeight():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/blocks?format=json")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_block_height = json.loads(strbuf.getvalue())['blocks'][0]['height']
return current_block_height
def getTxCountInBlock(block_height: int):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/block-height/%d?format=json" % (block_height))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
txlist = json.loads(strbuf.getvalue())['blocks'][0]['tx']
return len(txlist)
#def getListOfTxnsOnAddress(address: str):
#
#def getInputBitcoinInTx(txn: str):
#
#def getOutputBitcoinInTx(txn: str):
#
#def getChangeInTx(txn: str):
#
#def getNetworkFeesInTxn(txn: str):
def getTxRate(tx_count_in_block: int):
return tx_count_in_block/block_time_in_sec
# return block_time_in_sec/tx_count_in_block
def getAverageTxRateInLast24Hrs():
current_block_height = getCurrentBlockHeight()
min_in_a_day = 60*24
blocks_in_a_day = int(min_in_a_day/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_day, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_day
return average_tx_rate
def getAverageTxRateInLastWeek():
current_block_height = getCurrentBlockHeight()
min_in_a_week = 60*24*7
blocks_in_a_week = int(min_in_a_week/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_week, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_week
return average_tx_rate
def getAverageTxRateInLastMonth():
current_block_height = getCurrentBlockHeight()
min_in_a_month = 60*24*7
blocks_in_a_month = int(min_in_a_month/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_month, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_month
return average_tx_rate
def getCurrentNetworkHashRate():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/hashrate")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
current_network_hash_rate = int(strbuf.getvalue()) * 10**9
return current_network_hash_rate
def getCurrentBlockReward():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/bcperblock")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
block_reward_abs = int(strbuf.getvalue())
block_reward = block_reward_abs / btcval
return block_reward
def getCurrentBuyPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['buy'])
return current_buy_rate_in_inr
def getCurrentSellPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['sell'])
return current_buy_rate_in_inr
def getCurrentValueOfBitcoinInAddressInInr(address: str):
btc = getBalance(address) / btcval
price_in_inr = getCurrentSellPriceInInr()
value = btc * price_in_inr
return value
def getUnconfirmedTransactionCount():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/unconfirmedcount")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
unconfirmed_transaction_count = int(strbuf.getvalue())
return unconfirmed_transaction_count
def convertToRupeeFormat(num: float):
numstr = "%.2f" % (num)
# print("numstr = %s" % (numstr))
# print("numstr len = %s" % (len(numstr)))
commaloc = 6
while commaloc < len(numstr):
numstr = numstr[:-commaloc] + ',' + numstr[-commaloc:]
commaloc += 3
rupees = "\u20B9%s" % (numstr)
return rupees
electricity_rates = {"rate_slabs": [{"min": 1, "max": 30, "unit_price": 3.25}, {"min": 31, "max": 100, "unit_price": 4.7}, {"min": 101, "max": 200, "unit_price": 6.25}, {"min": 201, "unit_price": 7.3}]}
def getPriceFromUnit(unit: float):
rate_slabs = electricity_rates['rate_slabs']
price = 0
for slab in rate_slabs:
if slab['min'] > unit:
countinue
elif ('max' in slab and slab['max']) > unit or 'max' not in slab:
# if 'max' in slab:
# print("min = %.2f, max = %.2f, unit = %.2f" % (slab['min'], slab['max'], unit))
# else:
# print("min = %.2f, unit = %.2f" % (slab['min'], unit))
price += (unit - slab['min']) * slab['unit_price']
else:
price += (slab['max'] - slab['min']) * slab['unit_price']
return price
def getUnitFromPower(power: float):
unit = power * 24 * 30 / 1000
return unit
def getBlockMiningRatePer10Min(hashrate: int):
network_hashrate = getCurrentNetworkHashRate()
block_mining_rate = hashrate/network_hashrate
return block_mining_rate
def getBitcoinMiningRate(hashrate: int):
block_mining_rate = getBlockMiningRatePer10Min(hashrate)
mining_reward = getCurrentBlockReward()
bitcoin_mining_rate = block_mining_rate * mining_reward
return bitcoin_mining_rate
def getMiningPowerExpense(power: float):
unit = getUnitFromPower(power)
expense = getPriceFromUnit(unit)
return expense
def getBitcoinMinedPerMonth(hashrate: int):
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
return bitcoin_mined_per_month
def miningReturn(power: float, hashrate: int):
expense = getMiningPowerExpense(power)
bitcoin_mined_per_month = getBitcoinMinedPerMonth(hashrate)
revenue = bitcoin_mined_per_month * getCurrentSellPriceInInr()
profit = revenue - expense
return profit
def costOfMiningBitcoin(power: float, hashrate: int):
unit = getUnitFromPower(power)
price_per_month = getPriceFromUnit(unit)
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
cost_of_mining_bitcoin = price_per_month/bitcoin_mined_per_month
return cost_of_mining_bitcoin
if __name__ == "__main__":
balance = getBalance(myaddress) / btcval
print("Current Bitcoin balance = %.8f at Address = %s" % (balance, myaddress))
value = getCurrentValueOfBitcoinInAddressInInr(myaddress)
print("Current Value of Bitcoin = %.2f for Address = %s" % (value, myaddress))
current_block_height = getCurrentBlockHeight()
print("current block height = %d" % (current_block_height))
tx_count_in_last_block = getTxCountInBlock(current_block_height)
print("Number of transactions in last block = %d" % (tx_count_in_last_block))
tx_rate = getTxRate(tx_count_in_last_block)
print("Current transaction rate = %.6f" % (tx_rate))
# average_tx_rate = getAverageTxRateInLast24Hrs()
# print("Average Transaction Rate in last 24 Hrs = %.6f" % (average_tx_rate))
current_network_hash_rate = getCurrentNetworkHashRate()
print("Current Network Hash Rate = %d" % (current_network_hash_rate))
block_reward = getCurrentBlockReward()
print("Current Block Reward = %.8f" % (block_reward))
current_buy_rate_in_inr = getCurrentBuyPriceInInr()
current_buy_rate_in_rupees = convertToRupeeFormat(current_buy_rate_in_inr)
print("Current Buy Rate in Indian Rupees = %s" % (current_buy_rate_in_rupees))
miner_hashrate = 13.5 * 10**12
print("Miner hashrate = %d" % (miner_hashrate))
miner_power = 1323
print ("Miner Power in Watt = %f" % (miner_power))
expense = getMiningPowerExpense(miner_power)
print ("Miner Power Expense Per Month = %.2f" % (expense))
bitcoin_mined_per_month = getBitcoinMinedPerMonth(miner_hashrate)
print("Bitcoin Mined Per Month = %.8f from Miner with hashrate = %d" % (bitcoin_mined_per_month, miner_hashrate))
mining_return = miningReturn(miner_power, miner_hashrate)
print("Mining Return Per Month = %s" % (mining_return))
cost_of_mining_bitcoin = costOfMiningBitcoin(miner_power, miner_hashrate)
print("Cost of Mining Bitcoin = %.2f" % (cost_of_mining_bitcoin))
unconfirmed_transaction_count = getUnconfirmedTransactionCount()
print("Total Unconfirmed Transaction Count = %d" % (unconfirmed_transaction_count))
txn_history = getTxnHistoryOfAddress(myaddress)
txn_history_table = pd.DataFrame(txn_history)
print("Transaction History::\n%s" % (txn_history_table))
|
[
"pandas.DataFrame",
"io.BytesIO",
"pycurl.Curl",
"datetime.datetime.fromtimestamp"
] |
[((281, 290), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (288, 290), False, 'from io import BytesIO\n'), ((308, 321), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (319, 321), False, 'import pycurl\n'), ((890, 899), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (897, 899), False, 'from io import BytesIO\n'), ((917, 930), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (928, 930), False, 'import pycurl\n'), ((2901, 2910), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2908, 2910), False, 'from io import BytesIO\n'), ((2928, 2941), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (2939, 2941), False, 'import pycurl\n'), ((3371, 3380), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3378, 3380), False, 'from io import BytesIO\n'), ((3398, 3411), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (3409, 3411), False, 'import pycurl\n'), ((6240, 6249), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6247, 6249), False, 'from io import BytesIO\n'), ((6267, 6280), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (6278, 6280), False, 'import pycurl\n'), ((6671, 6680), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6678, 6680), False, 'from io import BytesIO\n'), ((6698, 6711), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (6709, 6711), False, 'import pycurl\n'), ((7126, 7135), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7133, 7135), False, 'from io import BytesIO\n'), ((7153, 7166), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (7164, 7166), False, 'import pycurl\n'), ((7594, 7603), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7601, 7603), False, 'from io import BytesIO\n'), ((7621, 7634), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (7632, 7634), False, 'import pycurl\n'), ((8277, 8286), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8284, 8286), False, 'from io import BytesIO\n'), ((8304, 8317), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (8315, 8317), False, 'import pycurl\n'), ((13890, 13915), 'pandas.DataFrame', 'pd.DataFrame', (['txn_history'], {}), '(txn_history)\n', (13902, 13915), True, 'import pandas as pd\n'), ((2660, 2708), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["eachtxn['time']"], {}), "(eachtxn['time'])\n", (2691, 2708), False, 'import datetime\n')]
|
from scipy.signal import find_peaks
import numpy as np
import math
def search_peaks(x_data, y_data, height=0.1, distance=10):
prominence = np.mean(y_data)
peak_list = find_peaks(y_data, height=height, prominence=prominence, distance=distance)
peaks = []
for i in peak_list[0]:
peak = (x_data[i], y_data[i])
peaks.append(peak)
return peaks
def search_database_peaks(all_spectrum, height=0.1, distance=10):
peaks_database = {}
for key in list(all_spectrum.keys()):
x_data = all_spectrum[key][0]
y_data = all_spectrum[key][1]
peaks = search_peaks(x_data, y_data, height=height, distance=distance)
peaks_database.update({key: peaks})
return peaks_database
def compare_peaks(peaks_database, peaks, abs_tol=5):
coincide_information = {}
for key in list(peaks_database.keys()):
coincide_list = []
for peak_d in peaks_database[key]:
for peak in peaks:
if math.isclose(peak[0], peak_d[0], abs_tol=abs_tol):
coincide_list.append([peak_d[0], peak[0]])
coincide_information.update(
{key: {'coincide_list': coincide_list, 'coincide_number': [len(peaks_database[key]), len(coincide_list)]}})
return coincide_information
def judge_matter(coincide_information, criterion=0.99):
contain_dict = {}
for key in list(coincide_information.keys()):
coincide_number = coincide_information[key]['coincide_number']
key_criterion = coincide_number[1] / coincide_number[0]
if key_criterion >= criterion:
contain_dict.update({key: key_criterion})
return contain_dict
def classify(x_data, y_data, all_spectrum):
peaks = search_peaks(x_data,y_data)
database_peaks = search_database_peaks(all_spectrum)
print(database_peaks)
compare_result = compare_peaks(database_peaks,peaks)
# pass
# print(compare_result)
return compare_result
compare_result=judge_matter(compare_result)
|
[
"numpy.mean",
"scipy.signal.find_peaks",
"math.isclose"
] |
[((144, 159), 'numpy.mean', 'np.mean', (['y_data'], {}), '(y_data)\n', (151, 159), True, 'import numpy as np\n'), ((176, 251), 'scipy.signal.find_peaks', 'find_peaks', (['y_data'], {'height': 'height', 'prominence': 'prominence', 'distance': 'distance'}), '(y_data, height=height, prominence=prominence, distance=distance)\n', (186, 251), False, 'from scipy.signal import find_peaks\n'), ((990, 1039), 'math.isclose', 'math.isclose', (['peak[0]', 'peak_d[0]'], {'abs_tol': 'abs_tol'}), '(peak[0], peak_d[0], abs_tol=abs_tol)\n', (1002, 1039), False, 'import math\n')]
|
import pandas as pd
from transformers import BertTokenizer, RobertaTokenizer, AutoTokenizer
import os
import numpy as np
import re
import glob
from nltk import sent_tokenize
from utils import num_tokens
import math
def read_generic_file(filepath):
""" reads any generic text file into
list containing one line as element
"""
text = []
with open(filepath, 'r') as f:
for line in f.read().splitlines():
text.append(line.strip())
return text
def offset_str2list(offset):
return [[int(start_end) for start_end in offset.split(',')] for offset in offset.split(';')]
def offset_decreaseSentOffset(sentOffset, scu_offsets):
return [[start_end[0] - sentOffset, start_end[1] - sentOffset] for start_end in scu_offsets]
def insert_string(string, index, value):
return string[:index] + value + string[index:]
# the next *four* functions are taken from PreSumm implementation
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
# words = _split_into_words(sentences)
words = sum(sentences, [])
# words = [w for w in words if w not in stopwords]
return _get_ngrams(n, words)
def cal_rouge(evaluated_ngrams, reference_ngrams):
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
return {"f": f1_score, "p": precision, "r": recall}
def greedy_selection(doc_sent_list, abstract_sent_list, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
abstract = sum(abstract_sent_list, [])
abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(' '.join(s)).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
reference_1grams = _get_word_ngrams(1, [abstract])
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
reference_2grams = _get_word_ngrams(2, [abstract])
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = cal_rouge(candidates_1, reference_1grams)['f']
rouge_2 = cal_rouge(candidates_2, reference_2grams)['f']
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def greedy_selection_MDS(doc_sent_list, abstracts, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def add_sent_special_tok(document, OIE_row = None):
doc_sents = sent_tokenize(document)#[:20]
if OIE_row is not None: #if main document
doc_sents = doc_sents[:MAX_SENT_MAIN_DOC]
sent_found_flag = False
for sent_idx, sent in enumerate(doc_sents):
if sent == OIE_row['docSentText']:
sent_found_flag = True
doc_sents[sent_idx] = add_OIE_special_tok(OIE_row['docSpanOffsets'], OIE_row['docSentCharIdx'], sent)
if num_tokens('<doc-s> ' + '<s> ' + ' </s> <s> '.join(doc_sents[:sent_idx+1]) + ' </s>' + ' </doc-s>', tokenizer,
add_special_tokens=True)> MAX_TOKENS:
return None
break
if not sent_found_flag:
return None
else: #if context document
doc_sents = doc_sents[:MAX_SENT_CONTEXT_DOC]
document = '<s> ' + ' </s> <s> '.join(doc_sents) + ' </s>'
return document
def adding_files_context(file_context_combination, data_path, topic_dir):
documents = []
for file_context in file_context_combination:
text = read_generic_file(os.path.join(data_path, topic_dir, file_context))
document = " ".join(text)
document = add_sent_special_tok(document)
document = add_doc_special_tok(document)
documents.append(document)
context = ' '.join(documents)
return context
def add_special_tok(row, document):
document_tmp = document[:]#add_OIE_special_tok(docSpanOffsets, document)
document_tmp = add_sent_special_tok(document_tmp, row)
if document_tmp is not None:
document_tmp = add_doc_special_tok(document_tmp)
return document_tmp
def add_doc_special_tok(document_tmp):
return '<doc-s> ' + document_tmp + ' </doc-s>'
def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent, special_tokens_for_global_attn = True):
# document_tmp = document[:]
span_offsets = offset_str2list(docSpanOffsets)
offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets)
# assume we have max 2 parts
if special_tokens_for_global_attn:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' <OIE1_END> ')
sent = insert_string(sent, offset[0], ' <OIE1_START> ')
else:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' > ')
sent = insert_string(sent, offset[0], ' < ')
return sent
def read_abstracts(DATASET, data_path, topic_dir):
abstracts = []
if DATASET.startswith('TAC'):
# for summary_path in glob.iglob(
# data_path + '/summaries/' + topic_dir[:-3].upper() + topic_dir[-2:].upper() + '.*'):
for summary_path in glob.iglob(
data_path + '/summaries/' + topic_dir[:-3].upper() + '*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
else:
for summary_path in glob.iglob(data_path + '/summaries/' + topic_dir[:-1].upper() + '.*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
return abstracts
def add_instance(full_instance, tokenizer, row, highlights_list, highlights_metadata_list, file_context_combination, alignment_label='alignment_label'):
full_instance, global_attention_idx = extract_global_attention_idx(full_instance, tokenizer)
print('num tokens:', num_tokens(full_instance, tokenizer, add_special_tokens=False))
highlights_list.append([full_instance, row[alignment_label], global_attention_idx, row['greedyMaxRouge']])
highlights_metadata_list.append(row.tolist()+ [file_context_combination])
def replace_special_token(text, special_token_char_idxes, old_special_token, new_special_token):
text = text[:special_token_char_idxes[-1]] + new_special_token + text[special_token_char_idxes[-1] + len(
old_special_token):] # replace '<OIE1_START>' with '<'
special_token_char_idxes[-1] += 1 # include new special token '<'
return text, special_token_char_idxes
def extract_global_attention_idx(text, tokenizer, model_max_tokens = None):
if model_max_tokens is None:
model_max_tokens = MAX_TOKENS
#and replace new special tokens with '<' '>' so the model wont have to learn new tokens.
special_tokens_idx_list = []
special_token_char_idxes = []
mark_start_idx = text.find('<OIE1_START>')
while mark_start_idx > -1:
# find special_token_char_idxes
special_token_char_idxes.append(mark_start_idx)
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
special_token_char_idxes.append(text.find('<OIE1_END>'))
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
mark_start_idx = text.find('<OIE1_START>')
# #find special_token_char_idxes
# special_token_char_idxes = []
# special_token_char_idxes.append(text.find('<OIE1_START>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
# special_token_char_idxes.append(text.find('<OIE1_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
# start_idx2 = text.find('<OIE2_START>')
# if start_idx2 > -1: #if exists
# special_token_char_idxes.append(start_idx2)
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_START>', '<')
# special_token_char_idxes.append(text.find('<OIE2_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_END>', '>')
# find special token idxes
for special_token_char_idx in special_token_char_idxes:
special_token_prev_text = text[:special_token_char_idx]
special_token_idx = num_tokens(special_token_prev_text, tokenizer) # special token start sent included as we take len of tokens which is the idx+1
assert(('<' in tokenizer.tokenize(text)[special_token_idx-1]) or ('>' in tokenizer.tokenize(text)[special_token_idx-1])) # check it finds the special token. special_token_idx-1 as we omit special start sent token, as tokemize function doesnt include it.
assert(special_token_idx < model_max_tokens) #it shouldnt be longer then 2048 (0-2047), and the last token is special end of sentence token.
special_tokens_idx_list.append(special_token_idx)
return text, special_tokens_idx_list
def createGT_labels(OIEs_topic, data_path, topic_dir, DATASET):
labels_column_name = 'greedyMaxRouge'
OIEs_topic['original_idx'] = range(len(OIEs_topic))
abstracts = read_abstracts(DATASET, data_path, topic_dir)
docFile_summSpan_cands = list(OIEs_topic['docSpanText'].values)
positive_summSpan_idx = greedy_selection_MDS(docFile_summSpan_cands, abstracts)
positive_summSpan_original_idx = [OIEs_topic['original_idx'].values[cand_idx] for cand_idx in positive_summSpan_idx]
scnd_filter_label = np.zeros(len(OIEs_topic), dtype=int)
scnd_filter_label[positive_summSpan_original_idx] = 1
if labels_column_name in OIEs_topic.columns:
scnd_filter_label = np.array(OIEs_topic[labels_column_name].to_list()) + scnd_filter_label
OIEs_topic[labels_column_name] = scnd_filter_label
##validation for correct indexes
positive_labeled_spans = OIEs_topic[OIEs_topic[labels_column_name] == 1]['docSpanText'].to_list()
positive_labeled_spans_validation = [docFile_summSpan_cands[cand_idx] in positive_labeled_spans for cand_idx in positive_summSpan_idx]
assert(all(positive_labeled_spans_validation))
return OIEs_topic
def add_sent_in_file_idx(OIEs_topic, data_path, topic_dir):
doc_sent_idx = np.zeros(len(OIEs_topic), dtype=int)
OIEs_topic['original_idx'] = range(len(OIEs_topic))
topic_files = os.listdir(os.path.join(data_path, topic_dir))
for file_idx, file in enumerate(topic_files):
OIEs_topic_file = OIEs_topic[OIEs_topic['documentFile']==file]
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
doc_sents = sent_tokenize(document)
for sent_idx, doc_sent in enumerate(doc_sents):
OIEs_topic_file_sent_original_idx = (OIEs_topic_file['original_idx'][OIEs_topic_file['docSentText'] == doc_sent]).values
doc_sent_idx[OIEs_topic_file_sent_original_idx] = sent_idx
OIEs_topic['inFile_sentIdx'] = doc_sent_idx
return OIEs_topic
def positive_augmentation(num_negative, num_positive, highlights_df, highlights_metadata_df, label_tag = 'label', SAFE_BUFFER = 100):
original_len_highlights_df = len(highlights_df)
augmentation_factor = (num_negative- num_positive - SAFE_BUFFER)/num_positive
if label_tag != 'label':
augmentation_factor = (num_negative - num_positive - SAFE_BUFFER) / len(highlights_df[highlights_df[label_tag]==1])
#threshold = 0.75
augmentation_factor = math.floor(augmentation_factor) #if augmentation_factor < (math.floor(augmentation_factor) + threshold) else math.ceil(augmentation_factor)
positive_highlights_df = highlights_df[highlights_df[label_tag] == 1]
positive_highlights_metadata_df = highlights_metadata_df.loc[positive_highlights_df.index, :]
if augmentation_factor >= 1:
for i in range(augmentation_factor):
highlights_df = highlights_df.append(positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
# augmentation_factor = (num_negative - num_positive) / num_positive # if still not equal- add part of positive samples.
# if augmentation_factor > 0.5:
if num_negative - num_positive > SAFE_BUFFER:
selected_index = np.random.choice(positive_highlights_df.index.to_list(),num_negative - num_positive -SAFE_BUFFER,replace=False)
selected_positive_highlights_df = highlights_df[:original_len_highlights_df].loc[selected_index, :] #copy from original highlights_df (before augmentation) so rows won't be double augmented by their index
selected_positive_highlights_metadata_df = highlights_metadata_df[:original_len_highlights_df].loc[selected_index, :]
highlights_df = highlights_df.append(selected_positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(selected_positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
return highlights_df, highlights_metadata_df
##################################
###### main ##############
##################################
if __name__ == "__main__":
np.random.seed(42)
SET = 'train'
DATASETS = ['TAC2008','TAC2009','TAC2010']
NUM_CONTEXT_FILES = 9
MAX_TOKENS = 4096
filter_negative = False
FILTER_RATE = 0.4
over_sample_positive = False
MAX_SENT_MAIN_DOC = 20
MAX_SENT_CONTEXT_DOC = 9
sentences_level = False
if SET == 'train':
filter_negative = True
over_sample_positive = True
positive_label = 'greedyMaxRouge'
if filter_negative:
filter_negative_label = '_filter_negative'
else:
filter_negative_label = ''
if over_sample_positive:
over_sample_positive_label = '_over_sample_positive'
else:
over_sample_positive_label = ''
if sentences_level:
sentences_level_label = '_sentence_based'
else:
sentences_level_label = ''
OUTPUT_PATH = 'OIE_highlights/{}_{}_CDLM{}{}{}_fixed_truncated.csv'.format("_".join(DATASETS), SET,
filter_negative_label,
over_sample_positive_label,
sentences_level_label)
highlights_list = []
highlights_metadata_list = []
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer = AutoTokenizer.from_pretrained('./CDLM/')
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<OIE1_START>', '<OIE1_END>', '<OIE2_START>', '<OIE2_END>']})
for DATASET in DATASETS:
data_path = 'data/{}/'.format(DATASET)
OIEs = pd.read_csv('OIE_cands/OIE_cands_{}.csv'.format(DATASET))
if sentences_level:
OIEs['docSpanText'] = OIEs['docSentText']
OIEs['docSpanOffsets'] = OIEs['docSentCharIdx'].apply(str) + ', ' + (
OIEs['docSentCharIdx'] + OIEs['docSentText'].apply(len)).apply(str)
used_positive_spans = 0
for topic_dir in os.listdir(data_path):
print(topic_dir)
if topic_dir == 'summaries':
continue
OIEs_topic = OIEs[OIEs['topic'] == topic_dir]
if DATASET.startswith('TAC'):
topic_dir_tac2011 = topic_dir[:-3].upper() + topic_dir[-2:].upper()
OIEs_topic = OIEs[OIEs['topic'] == topic_dir_tac2011]
OIEs_topic = add_sent_in_file_idx(OIEs_topic, data_path, topic_dir)
OIEs_topic = OIEs_topic[OIEs_topic['inFile_sentIdx'] < MAX_SENT_MAIN_DOC]
OIEs_topic = createGT_labels(OIEs_topic, data_path, topic_dir, DATASET)
topic_files = os.listdir(os.path.join(data_path, topic_dir))
topic_dates = [topic[re.search(r"\d", topic).start():] for topic in topic_files]#[topic_file[3:] for topic_file in topic_files]
topic_files = [x for _, x in sorted(zip(topic_dates, topic_files))]
for file_idx, file in enumerate(topic_files):
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
post_context_files = topic_files[file_idx + 1:file_idx + 1 + NUM_CONTEXT_FILES]
pre_context_files = []
if len(post_context_files) < NUM_CONTEXT_FILES:
diff_len = NUM_CONTEXT_FILES - len(post_context_files)
pre_context_files = topic_files[max(0, file_idx - diff_len):file_idx] # + context_files
assert (len(post_context_files + pre_context_files) == min(NUM_CONTEXT_FILES, len(topic_files) - 1))
# trunced_document = truncated_text_for_openie(document, tokenizer)
OIEs_topic_docFile = OIEs_topic[
OIEs_topic['documentFile'] == file]
for index, row in OIEs_topic_docFile.iterrows():
main_document = add_special_tok(row, document)
if main_document is None:
continue
if row[positive_label]:
used_positive_spans += 1
else:
if filter_negative:
if np.random.choice([0, 1], p=[FILTER_RATE,
1 - FILTER_RATE]): # 'continue' in random (1 - FILTER_RATE) of negative cases.
continue
# for file_context_combination in [context_files]:# combinations(topic_files_tmp,NUM_CONTEXT_FILES): # all context combinations of 2 files
pre_documents_context = adding_files_context(pre_context_files, data_path, topic_dir)
post_documents_context = adding_files_context(post_context_files, data_path, topic_dir)
file_context_combination = pre_context_files + post_context_files
full_instance = pre_documents_context + ' ' + main_document + ' ' + post_documents_context
add_instance(full_instance, tokenizer, row, highlights_list,
highlights_metadata_list, file_context_combination, alignment_label=positive_label)
print(len(highlights_list))
highlights_df = pd.DataFrame(highlights_list, columns=['', 'label', 'global_attention_idx', 'greedyMaxRouge'])
highlights_metadata_df = pd.DataFrame(highlights_metadata_list,
columns=OIEs_topic.columns.tolist() + ['doc_context'])
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
if over_sample_positive:
highlights_df, highlights_metadata_df = positive_augmentation(num_negative, num_positive, highlights_df,
highlights_metadata_df)
highlights_df = highlights_df[['', 'label', 'global_attention_idx']]
highlights_df.to_csv(OUTPUT_PATH, index=False)
highlights_metadata_df.to_csv(OUTPUT_PATH[:-4] + '_metadata.csv', index=False)
|
[
"pandas.DataFrame",
"utils.num_tokens",
"numpy.random.seed",
"os.path.join",
"nltk.sent_tokenize",
"math.floor",
"transformers.AutoTokenizer.from_pretrained",
"re.search",
"numpy.random.choice",
"re.sub",
"os.listdir"
] |
[((5885, 5908), 'nltk.sent_tokenize', 'sent_tokenize', (['document'], {}), '(document)\n', (5898, 5908), False, 'from nltk import sent_tokenize\n'), ((15283, 15314), 'math.floor', 'math.floor', (['augmentation_factor'], {}), '(augmentation_factor)\n', (15293, 15314), False, 'import math\n'), ((17416, 17434), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (17430, 17434), True, 'import numpy as np\n'), ((18964, 19004), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""./CDLM/"""'], {}), "('./CDLM/')\n", (18993, 19004), False, 'from transformers import BertTokenizer, RobertaTokenizer, AutoTokenizer\n'), ((22954, 23052), 'pandas.DataFrame', 'pd.DataFrame', (['highlights_list'], {'columns': "['', 'label', 'global_attention_idx', 'greedyMaxRouge']"}), "(highlights_list, columns=['', 'label', 'global_attention_idx',\n 'greedyMaxRouge'])\n", (22966, 23052), True, 'import pandas as pd\n'), ((2459, 2489), 're.sub', 're.sub', (['"""[^a-zA-Z0-9 ]"""', '""""""', 's'], {}), "('[^a-zA-Z0-9 ]', '', s)\n", (2465, 2489), False, 'import re\n'), ((4016, 4046), 're.sub', 're.sub', (['"""[^a-zA-Z0-9 ]"""', '""""""', 's'], {}), "('[^a-zA-Z0-9 ]', '', s)\n", (4022, 4046), False, 'import re\n'), ((9490, 9552), 'utils.num_tokens', 'num_tokens', (['full_instance', 'tokenizer'], {'add_special_tokens': '(False)'}), '(full_instance, tokenizer, add_special_tokens=False)\n', (9500, 9552), False, 'from utils import num_tokens\n'), ((12071, 12117), 'utils.num_tokens', 'num_tokens', (['special_token_prev_text', 'tokenizer'], {}), '(special_token_prev_text, tokenizer)\n', (12081, 12117), False, 'from utils import num_tokens\n'), ((14149, 14183), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir'], {}), '(data_path, topic_dir)\n', (14161, 14183), False, 'import os\n'), ((14440, 14463), 'nltk.sent_tokenize', 'sent_tokenize', (['document'], {}), '(document)\n', (14453, 14463), False, 'from nltk import sent_tokenize\n'), ((19627, 19648), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (19637, 19648), False, 'import os\n'), ((6985, 7033), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file_context'], {}), '(data_path, topic_dir, file_context)\n', (6997, 7033), False, 'import os\n'), ((14342, 14382), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file'], {}), '(data_path, topic_dir, file)\n', (14354, 14382), False, 'import os\n'), ((20311, 20345), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir'], {}), '(data_path, topic_dir)\n', (20323, 20345), False, 'import os\n'), ((20672, 20712), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file'], {}), '(data_path, topic_dir, file)\n', (20684, 20712), False, 'import os\n'), ((21883, 21941), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': '[FILTER_RATE, 1 - FILTER_RATE]'}), '([0, 1], p=[FILTER_RATE, 1 - FILTER_RATE])\n', (21899, 21941), True, 'import numpy as np\n'), ((20381, 20404), 're.search', 're.search', (['"""\\\\d"""', 'topic'], {}), "('\\\\d', topic)\n", (20390, 20404), False, 'import re\n')]
|
'''
Created on 2015/12/14
:author: hubo
'''
from __future__ import print_function
import unittest
from vlcp.server.server import Server
from vlcp.event.runnable import RoutineContainer
from vlcp.event.lock import Lock, Semaphore
from vlcp.config.config import manager
class Test(unittest.TestCase):
def setUp(self):
self.server = Server()
def tearDown(self):
pass
def testLock(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testWith(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
with l:
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testLock2(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj2'))
self.server.serve()
self.assertEqual(obj[0], 1)
def testTrylock(self):
rc = RoutineContainer(self.server.scheduler)
result = []
def routineTrylock(key):
l = Lock(key, rc.scheduler)
locked = l.trylock()
result.append(locked)
for m in rc.waitWithTimeout(0.5):
yield m
l.unlock()
rc.subroutine(routineTrylock('testobj'))
rc.subroutine(routineTrylock('testobj'))
self.server.serve()
self.assertEqual(result, [True, False])
def testBeginlock(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
locked = l.beginlock(rc)
if not locked:
for m in rc.waitWithTimeout(1.0):
yield m
locked = l.trylock()
if not locked:
raise ValueError('Not locked')
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
for m in rc.doEvents():
yield m
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(1.0):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 4)
def testBeginlock2(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
locked = l.beginlock(rc)
if not locked:
for m in rc.waitWithTimeout(0.5):
yield m
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(1.0):
yield m
obj[0] = t + 1
l.unlock()
for m in rc.doEvents():
yield m
for m in l.lock(rc):
yield m
t = obj[0]
if t != 2:
obj[0] = t - 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testSemaphore(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
smp = Semaphore('testobj', 2, rc.scheduler)
smp.create()
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"unittest.main",
"vlcp.event.lock.Semaphore",
"vlcp.event.runnable.RoutineContainer",
"vlcp.event.lock.Lock",
"vlcp.server.server.Server"
] |
[((5137, 5152), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5150, 5152), False, 'import unittest\n'), ((345, 353), 'vlcp.server.server.Server', 'Server', ([], {}), '()\n', (351, 353), False, 'from vlcp.server.server import Server\n'), ((433, 472), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (449, 472), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((954, 993), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (970, 993), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((1489, 1528), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (1505, 1528), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((2014, 2053), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (2030, 2053), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((2523, 2562), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (2539, 2562), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((3514, 3553), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (3530, 3553), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((4407, 4446), 'vlcp.event.runnable.RoutineContainer', 'RoutineContainer', (['self.server.scheduler'], {}), '(self.server.scheduler)\n', (4423, 4446), False, 'from vlcp.event.runnable import RoutineContainer\n'), ((4479, 4516), 'vlcp.event.lock.Semaphore', 'Semaphore', (['"""testobj"""', '(2)', 'rc.scheduler'], {}), "('testobj', 2, rc.scheduler)\n", (4488, 4516), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((537, 560), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (541, 560), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((1058, 1081), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (1062, 1081), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((1593, 1616), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (1597, 1616), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((2123, 2146), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (2127, 2146), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((2627, 2650), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (2631, 2650), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((3618, 3641), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (3622, 3641), False, 'from vlcp.event.lock import Lock, Semaphore\n'), ((4584, 4607), 'vlcp.event.lock.Lock', 'Lock', (['key', 'rc.scheduler'], {}), '(key, rc.scheduler)\n', (4588, 4607), False, 'from vlcp.event.lock import Lock, Semaphore\n')]
|
#!/usr/bin/env python
"""Convert Directory.
Usage: convert_directory.py <src_dir> <dest_dir>
-h --help show this
"""
import errno
import os
import subprocess
from docopt import docopt
def convert_directory(src, dest):
# Convert the files in place
for root, dirs, files in os.walk(src):
for filename in files:
name, ext = os.path.splitext(filename)
if ext in ['.md', '.markdown']:
html_filename = '.'.join([name, 'html'])
md_path = os.path.join(root, filename)
html_path = os.path.join(root, html_filename)
subprocess.call(['pandoc', md_path, '-s', '-o', html_path])
# Incredibly hacky way to move all files, except markdown files
# (Making sure image files get transferred to dest directory.)
subprocess.call(['rsync', '-a', src + '/', dest])
subprocess.call(['find', dest, '-name', '*.md', '-exec', 'rm', '{}', ';'])
subprocess.call(['find', dest, '-name', '*.markdown', '-exec', 'rm', '{}', ';'])
# Clean out generated html files in src directory.
subprocess.call(['find', src, '-name', '*.html', '-exec', 'rm', '{}', ';'])
if __name__ == '__main__':
args = docopt(__doc__, version='Convert Directory 0.1')
src = args['<src_dir>']
dest = args['<dest_dir>']
convert_directory(src, dest)
|
[
"docopt.docopt",
"os.walk",
"subprocess.call",
"os.path.splitext",
"os.path.join"
] |
[((291, 303), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (298, 303), False, 'import os\n'), ((821, 870), 'subprocess.call', 'subprocess.call', (["['rsync', '-a', src + '/', dest]"], {}), "(['rsync', '-a', src + '/', dest])\n", (836, 870), False, 'import subprocess\n'), ((875, 949), 'subprocess.call', 'subprocess.call', (["['find', dest, '-name', '*.md', '-exec', 'rm', '{}', ';']"], {}), "(['find', dest, '-name', '*.md', '-exec', 'rm', '{}', ';'])\n", (890, 949), False, 'import subprocess\n'), ((954, 1039), 'subprocess.call', 'subprocess.call', (["['find', dest, '-name', '*.markdown', '-exec', 'rm', '{}', ';']"], {}), "(['find', dest, '-name', '*.markdown', '-exec', 'rm', '{}', ';']\n )\n", (969, 1039), False, 'import subprocess\n'), ((1095, 1170), 'subprocess.call', 'subprocess.call', (["['find', src, '-name', '*.html', '-exec', 'rm', '{}', ';']"], {}), "(['find', src, '-name', '*.html', '-exec', 'rm', '{}', ';'])\n", (1110, 1170), False, 'import subprocess\n'), ((1210, 1258), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""Convert Directory 0.1"""'}), "(__doc__, version='Convert Directory 0.1')\n", (1216, 1258), False, 'from docopt import docopt\n'), ((360, 386), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (376, 386), False, 'import os\n'), ((514, 542), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (526, 542), False, 'import os\n'), ((571, 604), 'os.path.join', 'os.path.join', (['root', 'html_filename'], {}), '(root, html_filename)\n', (583, 604), False, 'import os\n'), ((621, 680), 'subprocess.call', 'subprocess.call', (["['pandoc', md_path, '-s', '-o', html_path]"], {}), "(['pandoc', md_path, '-s', '-o', html_path])\n", (636, 680), False, 'import subprocess\n')]
|
from django.test import TestCase
from hknweb.candidate.tests.models.utils import ModelFactory
class CommitteeProjectRequirementModelTests(TestCase):
def setUp(self):
semester = ModelFactory.create_semester(
semester="Spring",
year=0,
)
committeeproject = ModelFactory.create_committeeproject_requirement(
candidateSemesterActive=semester,
)
self.semester = semester
self.committeeproject = committeeproject
def test_str(self):
expected = "{} - {}".format(self.committeeproject.name, self.semester)
actual = str(self.committeeproject)
self.assertEqual(expected, actual)
|
[
"hknweb.candidate.tests.models.utils.ModelFactory.create_committeeproject_requirement",
"hknweb.candidate.tests.models.utils.ModelFactory.create_semester"
] |
[((192, 247), 'hknweb.candidate.tests.models.utils.ModelFactory.create_semester', 'ModelFactory.create_semester', ([], {'semester': '"""Spring"""', 'year': '(0)'}), "(semester='Spring', year=0)\n", (220, 247), False, 'from hknweb.candidate.tests.models.utils import ModelFactory\n'), ((310, 397), 'hknweb.candidate.tests.models.utils.ModelFactory.create_committeeproject_requirement', 'ModelFactory.create_committeeproject_requirement', ([], {'candidateSemesterActive': 'semester'}), '(candidateSemesterActive=\n semester)\n', (358, 397), False, 'from hknweb.candidate.tests.models.utils import ModelFactory\n')]
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer utilities."""
import copy
from typing import List, Optional
from absl import logging
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import tensorflow.compat.v2 as tf
def generate_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
*,
random_seed: int = -1,
prune_rms_thresh: float = -1.0,
max_iter: int = -1,
fallback_to_random: bool = False,
) -> Chem.rdchem.Mol:
"""Generates conformers for a given molecule.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
Returns:
Copy of a `molecule` with added hydrogens. The returned molecule contains
force field-optimised conformers. The number of conformers is guaranteed to
be <= max_num_conformers.
"""
mol = copy.deepcopy(molecule)
mol = Chem.AddHs(mol)
mol = _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=False)
if max_iter > 0:
mol_with_conformers = _minimize_by_mmff(mol, max_iter)
if mol_with_conformers is None:
mol_with_conformers = _minimize_by_uff(mol, max_iter)
else:
mol_with_conformers = mol
# Aligns conformations in a molecule to each other using the first
# conformation as the reference.
AllChem.AlignMolConformers(mol_with_conformers)
# We remove hydrogens to keep the number of atoms consistent with the graph
# nodes.
mol_with_conformers = Chem.RemoveHs(mol_with_conformers)
return mol_with_conformers
def atom_to_feature_vector(
atom: rdkit.Chem.rdchem.Atom,
conformer: Optional[np.ndarray] = None,
) -> List[float]:
"""Converts rdkit atom object to feature list of indices.
Args:
atom: rdkit atom object.
conformer: Generated conformers. Returns -1 values if set to None.
Returns:
List containing positions (x, y, z) of each atom from the conformer.
"""
if conformer:
pos = conformer.GetAtomPosition(atom.GetIdx())
return [pos.x, pos.y, pos.z]
return [np.nan, np.nan, np.nan]
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features
def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor:
"""Returns a single random rotation matrix."""
rotation_matrix = _get_random_rotation_3d()
if include_mirror_symmetry:
random_mirror_symmetry = _get_random_mirror_symmetry()
rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry)
return rotation_matrix
def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor:
"""Batch of vectors on a single rotation matrix."""
return tf.matmul(vectors, rotation_matrix)
def _embed_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
random_seed: int,
prune_rms_thresh: float,
fallback_to_random: bool,
*,
use_random: bool = False,
) -> Chem.rdchem.Mol:
"""Embeds conformers into a copy of a molecule.
If random coordinates allowed, tries not to use random coordinates at first,
and uses random only if fails.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
*:
use_random: Use random coordinates. Shouldn't be set by any caller except
this function itself.
Returns:
A copy of a molecule with embedded conformers.
Raises:
ValueError: if conformers cannot be obtained for a given molecule.
"""
mol = copy.deepcopy(molecule)
# Obtains parameters for conformer generation.
# In particular, ETKDG is experimental-torsion basic knowledge distance
# geometry, which allows to randomly generate an initial conformation that
# satisfies various geometric constraints such as lower and upper bounds on
# the distances between atoms.
params = AllChem.ETKDGv3()
params.randomSeed = random_seed
params.pruneRmsThresh = prune_rms_thresh
params.numThreads = -1
params.useRandomCoords = use_random
conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params)
if not conf_ids:
if not fallback_to_random or use_random:
raise ValueError('Cant get conformers')
return _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=True)
return mol
def _minimize_by_mmff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Optional[Chem.rdchem.Mol]:
"""Minimizes forcefield for conformers using MMFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers; or None if MMFF
cannot be performed.
"""
molecule_props = AllChem.MMFFGetMoleculeProperties(molecule)
if molecule_props is None:
return None
mol = copy.deepcopy(molecule)
for conf_id in range(mol.GetNumConformers()):
ff = AllChem.MMFFGetMoleculeForceField(
mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _minimize_by_uff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Chem.rdchem.Mol:
"""Minimizes forcefield for conformers using UFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers.
"""
mol = copy.deepcopy(molecule)
conf_ids = range(mol.GetNumConformers())
for conf_id in conf_ids:
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor:
"""Returns the 2d/3d matrix for mirror symmetry."""
zero = tf.zeros_like(sign)
one = tf.ones_like(sign)
# pylint: disable=bad-whitespace,bad-continuation
rot = [sign, zero, zero,
zero, one, zero,
zero, zero, one]
# pylint: enable=bad-whitespace,bad-continuation
shape = (3, 3)
rot = tf.stack(rot, axis=-1)
rot = tf.reshape(rot, shape)
return rot
def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor:
"""Converts a batch of quaternions to a batch of rotation matrices."""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
matrix = tf.stack([r00, r01, r02,
r10, r11, r12,
r20, r21, r22], axis=-1)
return tf.reshape(matrix, [3, 3])
def _get_random_rotation_3d() -> tf.Tensor:
random_quaternions = tf.random.normal(
shape=[4], dtype=tf.float32)
random_quaternions /= tf.linalg.norm(
random_quaternions, axis=-1, keepdims=True)
return _quaternion_to_rotation_matrix(random_quaternions)
def _get_random_mirror_symmetry() -> tf.Tensor:
random_0_1 = tf.random.uniform(
shape=(), minval=0, maxval=2, dtype=tf.int32)
random_signs = tf.cast((2 * random_0_1) - 1, tf.float32)
return _get_symmetry_rotation_matrix(random_signs)
|
[
"tensorflow.compat.v2.reshape",
"rdkit.Chem.RemoveHs",
"rdkit.Chem.AllChem.UFFGetMoleculeForceField",
"absl.logging.exception",
"tensorflow.compat.v2.matmul",
"copy.deepcopy",
"rdkit.Chem.AllChem.ETKDGv3",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.linalg.norm",
"tensorflow.compat.v2.cast",
"rdkit.Chem.AllChem.MMFFGetMoleculeForceField",
"tensorflow.compat.v2.random.normal",
"rdkit.Chem.AllChem.EmbedMultipleConfs",
"absl.logging.error",
"rdkit.Chem.AllChem.AlignMolConformers",
"tensorflow.compat.v2.random.uniform",
"numpy.array",
"tensorflow.compat.v2.ones_like",
"rdkit.Chem.AllChem.MMFFGetMoleculeProperties",
"rdkit.Chem.AddHs",
"rdkit.Chem.MolFromSmiles"
] |
[((1968, 1991), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (1981, 1991), False, 'import copy\n'), ((2000, 2015), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (2010, 2015), False, 'from rdkit import Chem\n'), ((2492, 2539), 'rdkit.Chem.AllChem.AlignMolConformers', 'AllChem.AlignMolConformers', (['mol_with_conformers'], {}), '(mol_with_conformers)\n', (2518, 2539), False, 'from rdkit.Chem import AllChem\n'), ((2654, 2688), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol_with_conformers'], {}), '(mol_with_conformers)\n', (2667, 2688), False, 'from rdkit import Chem\n'), ((3676, 3707), 'rdkit.Chem.MolFromSmiles', 'rdkit.Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (3700, 3707), False, 'import rdkit\n'), ((4631, 4677), 'numpy.array', 'np.array', (['atom_features_list'], {'dtype': 'np.float32'}), '(atom_features_list, dtype=np.float32)\n', (4639, 4677), True, 'import numpy as np\n'), ((5205, 5240), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['vectors', 'rotation_matrix'], {}), '(vectors, rotation_matrix)\n', (5214, 5240), True, 'import tensorflow.compat.v2 as tf\n'), ((6405, 6428), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (6418, 6428), False, 'import copy\n'), ((6752, 6769), 'rdkit.Chem.AllChem.ETKDGv3', 'AllChem.ETKDGv3', ([], {}), '()\n', (6767, 6769), False, 'from rdkit.Chem import AllChem\n'), ((6925, 6984), 'rdkit.Chem.AllChem.EmbedMultipleConfs', 'AllChem.EmbedMultipleConfs', (['mol', 'max_num_conformers', 'params'], {}), '(mol, max_num_conformers, params)\n', (6951, 6984), False, 'from rdkit.Chem import AllChem\n'), ((7729, 7772), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'AllChem.MMFFGetMoleculeProperties', (['molecule'], {}), '(molecule)\n', (7762, 7772), False, 'from rdkit.Chem import AllChem\n'), ((7827, 7850), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (7840, 7850), False, 'import copy\n'), ((8518, 8541), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (8531, 8541), False, 'import copy\n'), ((8915, 8934), 'tensorflow.compat.v2.zeros_like', 'tf.zeros_like', (['sign'], {}), '(sign)\n', (8928, 8934), True, 'import tensorflow.compat.v2 as tf\n'), ((8943, 8961), 'tensorflow.compat.v2.ones_like', 'tf.ones_like', (['sign'], {}), '(sign)\n', (8955, 8961), True, 'import tensorflow.compat.v2 as tf\n'), ((9177, 9199), 'tensorflow.compat.v2.stack', 'tf.stack', (['rot'], {'axis': '(-1)'}), '(rot, axis=-1)\n', (9185, 9199), True, 'import tensorflow.compat.v2 as tf\n'), ((9208, 9230), 'tensorflow.compat.v2.reshape', 'tf.reshape', (['rot', 'shape'], {}), '(rot, shape)\n', (9218, 9230), True, 'import tensorflow.compat.v2 as tf\n'), ((9788, 9852), 'tensorflow.compat.v2.stack', 'tf.stack', (['[r00, r01, r02, r10, r11, r12, r20, r21, r22]'], {'axis': '(-1)'}), '([r00, r01, r02, r10, r11, r12, r20, r21, r22], axis=-1)\n', (9796, 9852), True, 'import tensorflow.compat.v2 as tf\n'), ((9904, 9930), 'tensorflow.compat.v2.reshape', 'tf.reshape', (['matrix', '[3, 3]'], {}), '(matrix, [3, 3])\n', (9914, 9930), True, 'import tensorflow.compat.v2 as tf\n'), ((10000, 10045), 'tensorflow.compat.v2.random.normal', 'tf.random.normal', ([], {'shape': '[4]', 'dtype': 'tf.float32'}), '(shape=[4], dtype=tf.float32)\n', (10016, 10045), True, 'import tensorflow.compat.v2 as tf\n'), ((10077, 10135), 'tensorflow.compat.v2.linalg.norm', 'tf.linalg.norm', (['random_quaternions'], {'axis': '(-1)', 'keepdims': '(True)'}), '(random_quaternions, axis=-1, keepdims=True)\n', (10091, 10135), True, 'import tensorflow.compat.v2 as tf\n'), ((10268, 10331), 'tensorflow.compat.v2.random.uniform', 'tf.random.uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(2)', 'dtype': 'tf.int32'}), '(shape=(), minval=0, maxval=2, dtype=tf.int32)\n', (10285, 10331), True, 'import tensorflow.compat.v2 as tf\n'), ((10356, 10395), 'tensorflow.compat.v2.cast', 'tf.cast', (['(2 * random_0_1 - 1)', 'tf.float32'], {}), '(2 * random_0_1 - 1, tf.float32)\n', (10363, 10395), True, 'import tensorflow.compat.v2 as tf\n'), ((4990, 5040), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['rotation_matrix', 'random_mirror_symmetry'], {}), '(rotation_matrix, random_mirror_symmetry)\n', (4999, 5040), True, 'import tensorflow.compat.v2 as tf\n'), ((7908, 8017), 'rdkit.Chem.AllChem.MMFFGetMoleculeForceField', 'AllChem.MMFFGetMoleculeForceField', (['mol', 'molecule_props'], {'confId': 'conf_id', 'ignoreInterfragInteractions': '(False)'}), '(mol, molecule_props, confId=conf_id,\n ignoreInterfragInteractions=False)\n', (7941, 8017), False, 'from rdkit.Chem import AllChem\n'), ((8621, 8674), 'rdkit.Chem.AllChem.UFFGetMoleculeForceField', 'AllChem.UFFGetMoleculeForceField', (['mol'], {'confId': 'conf_id'}), '(mol, confId=conf_id)\n', (8653, 8674), False, 'from rdkit.Chem import AllChem\n'), ((4013, 4098), 'absl.logging.exception', 'logging.exception', (['"""Failed to generate conformers for %s . IOError %s."""', 'smile', 'e'], {}), "('Failed to generate conformers for %s . IOError %s.',\n smile, e)\n", (4030, 4098), False, 'from absl import logging\n'), ((4170, 4243), 'absl.logging.error', 'logging.error', (['"""Failed to generate conformers for %s . ValueError"""', 'smile'], {}), "('Failed to generate conformers for %s . ValueError', smile)\n", (4183, 4243), False, 'from absl import logging\n'), ((4317, 4378), 'absl.logging.error', 'logging.error', (['"""Failed to generate conformers for %s."""', 'smile'], {}), "('Failed to generate conformers for %s.', smile)\n", (4330, 4378), False, 'from absl import logging\n')]
|
"""Module for handling plotting functions
This module contains plotting classes to plot :class:`.Binning` objects.
Examples
--------
::
plt = plotting.get_plotter(binning)
plt.plot_values()
plt.savefig('output.png')
"""
from itertools import cycle
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from . import binning
def get_plotter(obj, *args, **kwargs):
"""Return a suitable plotting class instance for the object.
Parameters
----------
obj : object
The object for which a plotter should be returned.
*args : optional
**kwargs : optional
Additional arguments are passed to the init method of the plotter.
"""
if isinstance(obj, binning.RectilinearBinning):
return RectilinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.LinearBinning):
return LinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.CartesianProductBinning):
return CartesianProductBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.Binning):
return BinningPlotter(obj, *args, **kwargs)
if isinstance(obj, np.ndarray):
return ArrayPlotter(obj, *args, **kwargs)
raise TypeError(f"No known Plotter class for type {type(obj)}")
class Plotter:
"""Plotting base class.
Parameters
----------
figax : (Figure, Axes), optional
The figure and axis to plot in.
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
"""
def __init__(self, figax=None):
self.figax = figax
self.color = cycle("C%d" % (i,) for i in range(0, 10))
self.hatch = cycle([r"//", r"\\", r"O", "*"])
def __del__(self):
"""Clean up figures."""
if self.figax is not None:
plt.close(self.figax[0])
def subplots(self, *args, **kwargs):
"""Return the ``(Figure, Axes)`` tuple of the binning.
Creates one using Matplotlib's ``subplots``, if necessary.
"""
if self.figax is None:
self.figax = plt.subplots(*args, **kwargs)
return self.figax
def savefig(self, *args, **kwargs):
"""Save the figure."""
kwargs2 = {"bbox_inches": "tight"}
kwargs2.update(kwargs)
self.figax[0].savefig(*args, **kwargs2)
class ArrayPlotter(Plotter):
"""Plotting class for numpy arrays.
Parameters
----------
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
**kwargs : optional
Addittional keyword arguments are passed to :class:`Plotter`.
See also
--------
Plotter
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
"""
def __init__(self, array, bins_per_row=25, **kwargs):
self.array = array
self.bins_per_row = bins_per_row
Plotter.__init__(self, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_arrays(self, arrays):
try:
ret = [self._get_array(a) for a in arrays]
except (TypeError, IndexError):
ret = [self._get_array(arrays)]
return np.array(ret)
def get_bin_edges(self, i_min, i_max):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self):
"""Return the default label for the axis."""
return "Bin #"
@staticmethod
def _get_stack_functions(stack_function):
try:
# A number?
np.isfinite(stack_function)
except TypeError:
# Nope
pass
else:
# A number.
lobound = (1.0 - stack_function) / 2.0
hibound = 1.0 - lobound
def lower(x, axis=0, bound=lobound):
return np.quantile(x, bound, axis=axis)
def upper(x, axis=0, bound=hibound):
return np.quantile(x, bound, axis=axis)
return lower, upper
# No number
try:
# Tuple of functions?
lower, upper = stack_function
except TypeError:
# Nope
def lower(x, axis=0):
return np.sum(np.zeros_like(x), axis=axis)
upper = stack_function
return lower, upper
def plot_array(
self,
array=None,
density=False,
stack_function=np.mean,
margin_function=None,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray
The thing to plot.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
"""
# The `margin_function` parameter is only here so it can be
# safely used with all plotting methods
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
bins_per_row = self.bins_per_row
if bins_per_row >= 1:
n_rows = int(np.ceil(arrays.shape[-1] / bins_per_row))
else:
n_rows = 1
bins_per_row = arrays.shape[-1]
figax = self.subplots(
nrows=n_rows,
sharey=True,
figsize=(6.4, max(2.4 * n_rows, 4.8)),
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
for i, ax in enumerate(figax[1][:, 0]):
i_min = i * bins_per_row
i_max = min((i + 1) * bins_per_row, arrays.shape[-1])
y_hi = np.asfarray(upper(arrays[:, i_min:i_max], axis=0))
y_lo = np.asfarray(lower(arrays[:, i_min:i_max], axis=0))
bins = np.asfarray(self.get_bin_edges(i_min, i_max))
# Divide by relative bin widths
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
y_hi /= np.asfarray(rel_widths)
y_lo /= np.asfarray(rel_widths)
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
y_lo = np.append(y_lo, y_lo[-1])
y_hi = np.append(y_hi, y_hi[-1])
poly = ax.fill_between(bins, y_hi, y_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(y_lo))
ax.autoscale_view()
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.set_xlabel(self.get_axis_label())
def legend(self, **kwargs):
"""Draw a legend in the first axis."""
args = {
"loc": "best",
}
args.update(kwargs)
self.figax[1][0, 0].legend(**args)
class BinningPlotter(ArrayPlotter):
"""Plotting class for the simplest :class:`.Binning` class.
Parameters
----------
binning : Binning
The binning to be plotted.
marginalize_subbinnings : bool, optional
Plot the contents of subbinnings as a single bin.
**kwargs : optional
Addittional keyword arguments are passed to :class:`ArrayPlotter`.
See also
--------
ArrayPlotter
.Binning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : Binning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, marginalize_subbinnings=False, **kwargs):
self.binning = binning
self.marginalize_subbinnings = marginalize_subbinnings
array = self.binning.value_array
if marginalize_subbinnings:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
ArrayPlotter.__init__(self, array, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
# Marginalize subbinnings if necessary
if self.marginalize_subbinnings and array.shape != self.array.shape:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_binning(self, binning):
if binning is None:
binning = self.binning
return binning
def plot_values(self, binning=None, **kwargs):
"""Plot the values of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.value_array, **kwargs)
def plot_entries(self, binning=None, **kwargs):
"""Plot the entries of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.entries_array, **kwargs)
def plot_sumw2(self, binning=None, **kwargs):
"""Plot the sumw2 of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.sumw2_array, **kwargs)
class CartesianProductBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.CartesianProductBinning`
Parameters
----------
binning : CartesianProductBinning
The binning to be plottet
x_axis_binnings : list of int, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.CartesianProductBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : CartesianProductBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int
The indices of binnings to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
self.x_axis_binnings = x_axis_binnings
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
self.y_axis_binnings = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
BinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self, j_binning):
"""Return the default label for the axis."""
return "Binning %d Bin #" % (j_binning,)
def plot_array(
self,
array=None,
density=True,
stack_function=np.mean,
margin_function=np.sum,
scatter=-1,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray, optional
The data to be plotted.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
Dividing by the relative bin width, rather than the bin width directly,
ensures that the maximum values in all 1D projections are comparable.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
margin_function : function, optional
The function used to marginalize the data.
scatter : int, optional
Use a pseudo scatter plot with `scatter` number of points instead
of a 2D histogram. Allows to draw multiple sets of 2D data in the
same plot. The number of points in each cell is proportional to
the value being plotted. Using the `scatter` option is thus
implicitly replicating the behaviour of the `density` option for
the 2D plots. The `density` argument has no effect on the scatter
plots.
"""
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
shape = self.binning.bins_shape
arrays = arrays.reshape(arrays.shape[:1] + shape)
n_col = len(self.x_axis_binnings) + 1 # "+1" for the 1D projections
n_row = len(self.y_axis_binnings) + 1
# Widths and heights according to number of bins,
# 10 px (= 0.1") per bin
widths = [
0.1 * self.binning.binnings[i].data_size for i in self.x_axis_binnings
]
heights = [
0.1 * self.binning.binnings[i].data_size for i in self.y_axis_binnings
]
# Axes are counted top to bottom, but we want binnings bottom to top
heights.reverse()
# Total figure size
total_width = np.sum(widths)
total_height = np.sum(heights)
scale = 4.0 / min(max(total_width, total_height), 4.0)
# Room for the 1D histograms
if total_width == 0.0:
widths.append(6 / scale)
else:
widths.append(1.5 / scale)
if total_height == 0.0:
heights.insert(0, 4 / scale)
else:
heights.insert(0, 1.5 / scale)
# Update total sizes
total_width = np.sum(widths)
total_height = np.sum(heights)
fig_x = total_width * scale
fig_y = total_height * scale
# Subplot spacing is specified as multiple of average axis size
# We want it to be relative to the 1D projections
wspace = 0.1 * widths[-1] / (total_width / len(widths))
hspace = 0.1 * heights[0] / (total_height / len(heights))
figax = self.subplots(
nrows=n_row,
ncols=n_col,
sharex="col",
sharey="row",
figsize=(fig_x, fig_y),
gridspec_kw={
"width_ratios": widths,
"height_ratios": heights,
"wspace": wspace,
"hspace": hspace,
},
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
# 2D histograms
for x, i in enumerate(self.x_axis_binnings):
for y, j in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, x] # rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
for k in sorted((i, j), reverse=True):
del axis[k]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# 2D plots only show upper limit of stack
data = upper(data, axis=0)
# Flip axes if necessary
if i < j:
data = data.T
# Bin edges
x_edg = self.get_bin_edges(0, data.shape[1], i)
y_edg = self.get_bin_edges(0, data.shape[0], j)
# Plot the data
if scatter >= 0:
# Draw a set of random points and plot these
# Get bin numbers
csum = np.asfarray(data.cumsum())
csum /= np.max(csum)
indices = np.digitize(np.random.uniform(size=scatter), csum)
# Get x and y bin numbers
x_indices = indices % data.shape[1]
y_indices = indices // data.shape[1]
# Throw X and Y for each event
x = []
y = []
for ix, iy in zip(x_indices, y_indices):
x_min = x_edg[ix]
x_max = x_edg[ix + 1]
y_min = y_edg[iy]
y_max = y_edg[iy + 1]
x.append(np.random.uniform(x_min, x_max))
y.append(np.random.uniform(y_min, y_max))
# Plot the points
if data.sum() > 0:
# Only actually draw something if we have some events
ax.scatter(x, y, 1, color=color, marker=",")
else:
# Plot a regular 2D histogram
# Bin centres
x = np.convolve(x_edg, np.ones(2) / 2, mode="valid")
y = np.convolve(y_edg, np.ones(2) / 2, mode="valid")
xx = np.broadcast_to(x, (len(y), len(x))).flatten()
yy = np.repeat(y, len(x))
# Plot it
if data.sum() == 0:
# Empty data messes with the normalisation
data.fill(0.001)
ax.hist2d(
xx, yy, weights=data.flat, bins=(x_edg, y_edg), density=density
)
# 1D vertical histograms
for x, i in enumerate(self.x_axis_binnings):
# Get axis to plot in
ax = figax[1][0, x]
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_between(bins, data_hi, data_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-1, x]
ax.set_xlabel(self.get_axis_label(i))
# 1D horizontal histograms
for y, i in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, -1] # Rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_betweenx(bins, data_hi, data_lo, **args)
# Add sticky x edge so histograms get plotted more beautifully
poly.sticky_edges.x.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-y - 1, 0] # Rows are counted top to bottom
ax.set_ylabel(self.get_axis_label(i))
# Hide empty axes
figax[1][0, -1].set_axis_off()
def legend(self, **kwargs):
"""Draw a legend in the upper right corner of the plot."""
handles, labels = self.figax[1][0, 0].get_legend_handles_labels()
args = {
"loc": "center",
"borderaxespad": 0.0,
"frameon": False,
}
args.update(kwargs)
self.figax[1][0, -1].legend(handles, labels, **args)
class LinearBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.LinearBinning`
Parameters
----------
binning : LinearBinning
The binning to be plottet
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.LinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : LinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, **kwargs):
kwargs["marginalize_subbinnings"] = True
args = {
"bins_per_row": -1,
}
args.update(kwargs)
BinningPlotter.__init__(self, binning, **args)
def plot_array(self, *args, **kwargs):
"""Plot an array.
See :meth:`ArrayPlotter.plot_array`.
"""
# Change default behaviour of `density`
kwargs["density"] = kwargs.get("density", True)
return ArrayPlotter.plot_array(self, *args, **kwargs)
def get_bin_edges(self, i_min, i_max):
"""Get the finite bin edges."""
bins = self.binning.bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self):
"""Return variable name."""
return self.binning.variable
class RectilinearBinningPlotter(CartesianProductBinningPlotter):
"""Plotting class for :class:`.RectilinearBinning`
Parameters
----------
binning : RectilinearBinning
The binning to be plottet
x_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`CartesianProductBinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
CartesianProductBinningPlotter
.RectilinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : RectilinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int or str
The indices or variable names of to be plotted on the x-axis.
y_axis_binnings : list of int or str
The indices or variable names to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
else:
x_axis_binnings = map(binning.get_variable_index, x_axis_binnings)
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
else:
y_axis_binnings = map(binning.get_variable_index, y_axis_binnings)
kwargs["x_axis_binnings"] = x_axis_binnings
kwargs["y_axis_binnings"] = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
CartesianProductBinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the finite bin edges."""
bins = self.binning.binnings[j_binning].bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self, j_binning):
"""Return variable name."""
return self.binning.binnings[j_binning].variable
|
[
"numpy.random.uniform",
"numpy.quantile",
"numpy.sum",
"numpy.ceil",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.asfarray",
"matplotlib.ticker.MaxNLocator",
"numpy.isfinite",
"numpy.ones",
"numpy.append",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.arange",
"itertools.cycle",
"matplotlib.pyplot.subplots"
] |
[((1942, 1973), 'itertools.cycle', 'cycle', (["['//', '\\\\\\\\', 'O', '*']"], {}), "(['//', '\\\\\\\\', 'O', '*'])\n", (1947, 1973), False, 'from itertools import cycle\n'), ((4113, 4126), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4121, 4126), True, 'import numpy as np\n'), ((4253, 4276), 'numpy.arange', 'np.arange', (['i_min', 'i_max'], {}), '(i_min, i_max)\n', (4262, 4276), True, 'import numpy as np\n'), ((4292, 4323), 'numpy.append', 'np.append', (['(x - 0.5)', '(x[-1] + 0.5)'], {}), '(x - 0.5, x[-1] + 0.5)\n', (4301, 4323), True, 'import numpy as np\n'), ((13183, 13206), 'numpy.arange', 'np.arange', (['i_min', 'i_max'], {}), '(i_min, i_max)\n', (13192, 13206), True, 'import numpy as np\n'), ((13222, 13253), 'numpy.append', 'np.append', (['(x - 0.5)', '(x[-1] + 0.5)'], {}), '(x - 0.5, x[-1] + 0.5)\n', (13231, 13253), True, 'import numpy as np\n'), ((15951, 15965), 'numpy.sum', 'np.sum', (['widths'], {}), '(widths)\n', (15957, 15965), True, 'import numpy as np\n'), ((15989, 16004), 'numpy.sum', 'np.sum', (['heights'], {}), '(heights)\n', (15995, 16004), True, 'import numpy as np\n'), ((16409, 16423), 'numpy.sum', 'np.sum', (['widths'], {}), '(widths)\n', (16415, 16423), True, 'import numpy as np\n'), ((16447, 16462), 'numpy.sum', 'np.sum', (['heights'], {}), '(heights)\n', (16453, 16462), True, 'import numpy as np\n'), ((26238, 26251), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (26246, 26251), True, 'import numpy as np\n'), ((29330, 29343), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (29338, 29343), True, 'import numpy as np\n'), ((2078, 2102), 'matplotlib.pyplot.close', 'plt.close', (['self.figax[0]'], {}), '(self.figax[0])\n', (2087, 2102), True, 'from matplotlib import pyplot as plt\n'), ((2346, 2375), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*args'], {}), '(*args, **kwargs)\n', (2358, 2375), True, 'from matplotlib import pyplot as plt\n'), ((3739, 3756), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (3749, 3756), True, 'import numpy as np\n'), ((4573, 4600), 'numpy.isfinite', 'np.isfinite', (['stack_function'], {}), '(stack_function)\n', (4584, 4600), True, 'import numpy as np\n'), ((7842, 7867), 'numpy.append', 'np.append', (['y_lo', 'y_lo[-1]'], {}), '(y_lo, y_lo[-1])\n', (7851, 7867), True, 'import numpy as np\n'), ((7887, 7912), 'numpy.append', 'np.append', (['y_hi', 'y_hi[-1]'], {}), '(y_hi, y_hi[-1])\n', (7896, 7912), True, 'import numpy as np\n'), ((9924, 9941), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (9934, 9941), True, 'import numpy as np\n'), ((21306, 21337), 'numpy.append', 'np.append', (['data_lo', 'data_lo[-1]'], {}), '(data_lo, data_lo[-1])\n', (21315, 21337), True, 'import numpy as np\n'), ((21360, 21391), 'numpy.append', 'np.append', (['data_hi', 'data_hi[-1]'], {}), '(data_hi, data_hi[-1])\n', (21369, 21391), True, 'import numpy as np\n'), ((23054, 23085), 'numpy.append', 'np.append', (['data_lo', 'data_lo[-1]'], {}), '(data_lo, data_lo[-1])\n', (23063, 23085), True, 'import numpy as np\n'), ((23108, 23139), 'numpy.append', 'np.append', (['data_hi', 'data_hi[-1]'], {}), '(data_hi, data_hi[-1])\n', (23117, 23139), True, 'import numpy as np\n'), ((25769, 25788), 'numpy.isfinite', 'np.isfinite', (['ret[0]'], {}), '(ret[0])\n', (25780, 25788), True, 'import numpy as np\n'), ((26033, 26053), 'numpy.isfinite', 'np.isfinite', (['ret[-1]'], {}), '(ret[-1])\n', (26044, 26053), True, 'import numpy as np\n'), ((28861, 28880), 'numpy.isfinite', 'np.isfinite', (['ret[0]'], {}), '(ret[0])\n', (28872, 28880), True, 'import numpy as np\n'), ((29125, 29145), 'numpy.isfinite', 'np.isfinite', (['ret[-1]'], {}), '(ret[-1])\n', (29136, 29145), True, 'import numpy as np\n'), ((4861, 4893), 'numpy.quantile', 'np.quantile', (['x', 'bound'], {'axis': 'axis'}), '(x, bound, axis=axis)\n', (4872, 4893), True, 'import numpy as np\n'), ((4967, 4999), 'numpy.quantile', 'np.quantile', (['x', 'bound'], {'axis': 'axis'}), '(x, bound, axis=axis)\n', (4978, 4999), True, 'import numpy as np\n'), ((6578, 6618), 'numpy.ceil', 'np.ceil', (['(arrays.shape[-1] / bins_per_row)'], {}), '(arrays.shape[-1] / bins_per_row)\n', (6585, 6618), True, 'import numpy as np\n'), ((7546, 7569), 'numpy.asfarray', 'np.asfarray', (['rel_widths'], {}), '(rel_widths)\n', (7557, 7569), True, 'import numpy as np\n'), ((7594, 7617), 'numpy.asfarray', 'np.asfarray', (['rel_widths'], {}), '(rel_widths)\n', (7605, 7617), True, 'import numpy as np\n'), ((8089, 8101), 'numpy.min', 'np.min', (['y_lo'], {}), '(y_lo)\n', (8095, 8101), True, 'import numpy as np\n'), ((8181, 8213), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8199, 8213), False, 'from matplotlib import ticker\n'), ((21574, 21589), 'numpy.min', 'np.min', (['data_lo'], {}), '(data_lo)\n', (21580, 21589), True, 'import numpy as np\n'), ((21703, 21735), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (21721, 21735), False, 'from matplotlib import ticker\n'), ((23323, 23338), 'numpy.min', 'np.min', (['data_lo'], {}), '(data_lo)\n', (23329, 23338), True, 'import numpy as np\n'), ((23452, 23484), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (23470, 23484), False, 'from matplotlib import ticker\n'), ((25823, 25842), 'numpy.isfinite', 'np.isfinite', (['ret[2]'], {}), '(ret[2])\n', (25834, 25842), True, 'import numpy as np\n'), ((25913, 25932), 'numpy.isfinite', 'np.isfinite', (['ret[1]'], {}), '(ret[1])\n', (25924, 25932), True, 'import numpy as np\n'), ((26088, 26108), 'numpy.isfinite', 'np.isfinite', (['ret[-3]'], {}), '(ret[-3])\n', (26099, 26108), True, 'import numpy as np\n'), ((28915, 28934), 'numpy.isfinite', 'np.isfinite', (['ret[2]'], {}), '(ret[2])\n', (28926, 28934), True, 'import numpy as np\n'), ((29005, 29024), 'numpy.isfinite', 'np.isfinite', (['ret[1]'], {}), '(ret[1])\n', (29016, 29024), True, 'import numpy as np\n'), ((29180, 29200), 'numpy.isfinite', 'np.isfinite', (['ret[-3]'], {}), '(ret[-3])\n', (29191, 29200), True, 'import numpy as np\n'), ((18518, 18530), 'numpy.max', 'np.max', (['csum'], {}), '(csum)\n', (18524, 18530), True, 'import numpy as np\n'), ((5252, 5268), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5265, 5268), True, 'import numpy as np\n'), ((18573, 18604), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'scatter'}), '(size=scatter)\n', (18590, 18604), True, 'import numpy as np\n'), ((19148, 19179), 'numpy.random.uniform', 'np.random.uniform', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (19165, 19179), True, 'import numpy as np\n'), ((19214, 19245), 'numpy.random.uniform', 'np.random.uniform', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (19231, 19245), True, 'import numpy as np\n'), ((19623, 19633), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (19630, 19633), True, 'import numpy as np\n'), ((19696, 19706), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (19703, 19706), True, 'import numpy as np\n')]
|
"""
ported from https://github.com/NASA-DEVELOP/dnppy/tree/master/dnppy/landsat
"""
# standard imports
from .landsat_metadata import landsat_metadata
from . import core
import os
from pathlib import Path
import numpy as np
import rasterio
__all__ = ['toa_radiance_8', # complete
'toa_radiance_457',
'calc_radiance_8',
'calc_radiance_457'
]
def calc_radiance_457(nparray,band_num,meta_path):
"""
Calculate the radiance for a landsat 4,5,7 band
Parameters
----------
np_image: ndarray, 2-D, uint16
landsat scene counts
band_num: str
landsat ban
meta_path: Path object
path to MTL.txt file for scene
Returns
-------
TOA_rad: ndarray, 2-d, float32
radiance for the scene (W/m^2/micron/sr
"""
#the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
#if this is not present, the meta data is considered new.
#Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
#metadata format was changed August 29, 2012. This tool can process either the new or old format
with open(meta_path) as f:
MText = f.read()
if "PRODUCT_CREATION_TIME" in MText:
Meta = "oldMeta"
Band6length = 2
else:
Meta = "newMeta"
Band6length = 8
#The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta == "newMeta":
TileName = getattr(metadata, "LANDSAT_SCENE_ID")
year = TileName[9:13]
jday = TileName[13:16]
date = getattr(metadata, "DATE_ACQUIRED")
elif Meta == "oldMeta":
TileName = getattr(metadata, "BAND1_FILE_NAME")
year = TileName[13:17]
jday = TileName[17:20]
date = getattr(metadata, "ACQUISITION_DATE")
#the spacecraft from which the imagery was capture is identified
#this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft = getattr(metadata, "SPACECRAFT_ID")
if "7" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7','8']
elif "5" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7']
elif "4" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7']
else:
raise ValueError("Landsat 4, 5, or 7")
#arcpy.AddError("This tool only works for Landsat 4, 5, or 7")
#raise arcpy.ExecuteError()
if band_num not in TM_ETM_bands:
errmsg=f"""Can only perform reflectance conversion on OLI sensor bands")
Skipping band {band_num}
"""
raise ValueError(errmsg)
print(f"Processing radiance for band {band_num}")
str_path = str(meta_path)
band_path = Path(str_path.replace("MTL.txt",f"B{band_num}.TIF"))
with rasterio.open(str(band_path)) as raster:
Qcal = raster.read(1)
hit = (Qcal == 0)
Qcal=Qcal.astype(np.float32)
Qcal[hit]=np.nan
#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
if Meta == "newMeta":
LMax = getattr(metadata, "RADIANCE_MAXIMUM_BAND_{0}".format(band_num))
LMin = getattr(metadata, "RADIANCE_MINIMUM_BAND_{0}".format(band_num))
QCalMax = getattr(metadata, "QUANTIZE_CAL_MAX_BAND_{0}".format(band_num))
QCalMin = getattr(metadata, "QUANTIZE_CAL_MIN_BAND_{0}".format(band_num))
elif Meta == "oldMeta":
LMax = getattr(metadata, "LMAX_BAND{0}".format(band_num))
LMin = getattr(metadata, "LMIN_BAND{0}".format(band_num))
QCalMax = getattr(metadata, "QCALMAX_BAND{0}".format(band_num))
QCalMin = getattr(metadata, "QCALMIN_BAND{0}".format(band_num))
TOA_rad = (((LMax - LMin)/(QCalMax-QCalMin)) * (Qcal - QCalMin)) + LMin
return TOA_rad
def calc_radiance_8(nparray,band_num,meta_path):
"""
Calculate the radiance for a landsat 8 band
Parameters
----------
np_image: ndarray, 2-D, uint16
landsat scene counts
band_num: str
landsat ban
meta_path: Path object
path to MTL.txt file for scene
Returns
-------
TOA_rad: ndarray, 2-d, float32
radiance for the scene (W/m^2/micron/sr
"""
meta = landsat_metadata(meta_path)
#scrape the attribute data
Ml = getattr(meta,"RADIANCE_MULT_BAND_{0}".format(band_num)) # multiplicative scaling factor
Al = getattr(meta,"RADIANCE_ADD_BAND_{0}".format(band_num)) # additive rescaling factor
#calculate Top-of-Atmosphere radiance
TOA_rad = (Qcal * Ml) + Al
return TOA_rad
def toa_radiance_8(band_nums, meta_path):
"""
Top of Atmosphere radiance (in Watts/(square meter x steradians x micrometers))
conversion for landsat 8 data. To be performed on raw Landsat 8
level 1 data. See link below for details:
see here http://landsat.usgs.gov/Landsat8_Using_Product.php
Parameters
----------
band_nums: list
A list of desired band numbers such as [3, 4, 5]
meta_path: str or Path object
The full filepath to the MTL.txt metadata file for those bands
Returns
-------
out_dict: dict
dictionary with band_num as keys and TOA radiance (W/m2/sr/um) as values
"""
meta_path = Path(meta_path).resolve()
#enforce list of band numbers and grab the metadata from the MTL file
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
OLI_bands = ['1','2','3','4','5','6','7','8','9']
#loop through each band
out_dict=dict()
for band_num in band_nums:
print(f'working on band {band_num}')
if band_num not in OLI_bands:
print("Can only perform reflectance conversion on OLI sensor bands")
print("Skipping band {0}".format(band_num))
continue
#create the band name
str_path = str(meta_path)
band_path = Path(str_path.replace("MTL.txt",f"B{band_num}.TIF"))
with rasterio.open(str(band_path)) as raster:
Qcal = raster.read(1)
hit = (Qcal == 0)
Qcal=Qcal.astype(np.float32)
Qcal[hit]=np.nan
out_dict[int(band_num)]=calc_radiance_8(Qcal,band_num,meta_path)
return out_dict
def toa_radiance_457(band_nums, meta_path, outdir = None):
"""
Top of Atmosphere radiance (in Watts/(square meter x steradians x micrometers))
conversion for Landsat 4, 5, or 7 level 1 data.
See link below for details:
see here http://landsat.usgs.gov/Landsat8_Using_Product.php
Parameters
----------
band_nums: list
A list of desired band numbers such as [3, 4, 5]
meta_path: str or Path object
The full filepath to the MTL.txt metadata file for those bands
Returns
-------
out_dict: dict
dictionary with band_num as keys and TOA radiance (W/m2/sr/um) as values
"""
meta_path = Path(meta_path).resolve()
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
#Calculating values for each band
out_dict={}
for band_num in band_nums:
out_dict[int(band_num)]=calc_radiance_457(np_array,band_num,meta_path)
return out_dict
|
[
"pathlib.Path"
] |
[((5409, 5424), 'pathlib.Path', 'Path', (['meta_path'], {}), '(meta_path)\n', (5413, 5424), False, 'from pathlib import Path\n'), ((7077, 7092), 'pathlib.Path', 'Path', (['meta_path'], {}), '(meta_path)\n', (7081, 7092), False, 'from pathlib import Path\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
import h5py #s a common package to interact with
# a dataset that is stored on an H5 file.
#from lr_utils import load_dataset
#load datasets
#Load lr_utils for loading train and testinng datasets
def load_dataset():
train_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 50
example = train_set_x_orig[index]
plt.imshow(train_set_x_orig[index])
plt.show()
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples #Reshape the training and test data sets so that images of size (num_px, num_px, 3)
# are flattened into single vectors of shape (num_px ∗∗ num_px ∗∗ 3, 1).
# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b ∗∗ c ∗∗ d, a) is to use:
# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], 64*64*3).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
#To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel
# value is actually a vector of three numbers ranging from 0 to 255.
# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you
# substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation
# of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to
# just divide every row of the dataset by 255 (the maximum value of a pixel channel).Let's standardize our dataset.
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
print('number of train datasets =' + str(train_set_x.shape))
print('number of test datasets =' + str (test_set_x.shape))
#Key steps: -
# 1. Initialize the parameters of the model
# 2. Learn the parameters for the model by minimizing the cost
# 3. Use the learned parameters to make predictions (on the test set)
# 4. Analyse the results and conclude
#algorithm building:
# The main steps for building a Neural Network are:
# Define the model structure (such as number of input features)
# Initialize the model's parameters
# Loop:
# Calculate current loss (forward propagation)
# Calculate current gradient (backward propagation)
# Update parameters (gradient descent)
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1. / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###initialize_with_zeros
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
w = np.zeros(shape=(dim, 1), dtype=np.float32)
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
#forward and backward propagation
#Implement a function propagate() that computes the cost function and its gradient.
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = (-1. / m) * np.sum((Y * np.log(A) + (1 - Y) * np.log(1 - A)), axis=1) # compute cost
# BACKWARD PROPAGATION (TO FIND GRAD)
dw = (1. / m) * np.dot(X, ((A - Y).T))
db = (1. / m) * np.sum(A - Y, axis=1)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
##OPTIMIZATION
# initialized your parameters.
# to compute a cost function and its gradient.
# update the parameters using gradient descent
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
grads, cost = propagate(w=w, b=b, X=X, Y=Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
w = w - learning_rate * dw
b = b - learning_rate * db
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
##PREDICTION PART
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
A = sigmoid(np.dot(w.T, X) + b)
[print(x) for x in A]
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if A[0, i] >= 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
assert (Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
## MARGE ALL FUNCTION INTO A MODEL
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y,
num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 49
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
plt.show()
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \""
+ classes[int(d["Y_prediction_test"][0,index])].decode("utf-8")
+ "\" picture.")
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
|
[
"h5py.File",
"matplotlib.pyplot.show",
"numpy.dot",
"matplotlib.pyplot.plot",
"numpy.sum",
"numpy.log",
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1383, 1418), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_set_x_orig[index]'], {}), '(train_set_x_orig[index])\n', (1393, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1428, 1430), True, 'import matplotlib.pyplot as plt\n'), ((12747, 12757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12755, 12757), True, 'import matplotlib.pyplot as plt\n'), ((12983, 13005), 'numpy.squeeze', 'np.squeeze', (["d['costs']"], {}), "(d['costs'])\n", (12993, 13005), True, 'import numpy as np\n'), ((13007, 13022), 'matplotlib.pyplot.plot', 'plt.plot', (['costs'], {}), '(costs)\n', (13015, 13022), True, 'import matplotlib.pyplot as plt\n'), ((13024, 13042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (13034, 13042), True, 'import matplotlib.pyplot as plt\n'), ((13044, 13083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (per hundreds)"""'], {}), "('iterations (per hundreds)')\n", (13054, 13083), True, 'import matplotlib.pyplot as plt\n'), ((13141, 13151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13149, 13151), True, 'import matplotlib.pyplot as plt\n'), ((335, 422), 'h5py.File', 'h5py.File', (['"""/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5"""', '"""r"""'], {}), "('/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5',\n 'r')\n", (344, 422), False, 'import h5py\n'), ((443, 484), 'numpy.array', 'np.array', (["train_dataset['train_set_x'][:]"], {}), "(train_dataset['train_set_x'][:])\n", (451, 484), True, 'import numpy as np\n'), ((536, 577), 'numpy.array', 'np.array', (["train_dataset['train_set_y'][:]"], {}), "(train_dataset['train_set_y'][:])\n", (544, 577), True, 'import numpy as np\n'), ((625, 711), 'h5py.File', 'h5py.File', (['"""/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5"""', '"""r"""'], {}), "('/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5',\n 'r')\n", (634, 711), False, 'import h5py\n'), ((731, 770), 'numpy.array', 'np.array', (["test_dataset['test_set_x'][:]"], {}), "(test_dataset['test_set_x'][:])\n", (739, 770), True, 'import numpy as np\n'), ((820, 859), 'numpy.array', 'np.array', (["test_dataset['test_set_y'][:]"], {}), "(test_dataset['test_set_y'][:])\n", (828, 859), True, 'import numpy as np\n'), ((901, 942), 'numpy.array', 'np.array', (["test_dataset['list_classes'][:]"], {}), "(test_dataset['list_classes'][:])\n", (909, 942), True, 'import numpy as np\n'), ((5344, 5386), 'numpy.zeros', 'np.zeros', ([], {'shape': '(dim, 1)', 'dtype': 'np.float32'}), '(shape=(dim, 1), dtype=np.float32)\n', (5352, 5386), True, 'import numpy as np\n'), ((6842, 6858), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (6852, 6858), True, 'import numpy as np\n'), ((6982, 7002), 'numpy.array', 'np.array', (['[[1], [2]]'], {}), '([[1], [2]])\n', (6990, 7002), True, 'import numpy as np\n'), ((7006, 7032), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (7014, 7032), True, 'import numpy as np\n'), ((7031, 7049), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (7039, 7049), True, 'import numpy as np\n'), ((9838, 9854), 'numpy.zeros', 'np.zeros', (['(1, m)'], {}), '((1, m))\n', (9846, 9854), True, 'import numpy as np\n'), ((6694, 6714), 'numpy.dot', 'np.dot', (['X', '(A - Y).T'], {}), '(X, (A - Y).T)\n', (6700, 6714), True, 'import numpy as np\n'), ((6738, 6759), 'numpy.sum', 'np.sum', (['(A - Y)'], {'axis': '(1)'}), '(A - Y, axis=1)\n', (6744, 6759), True, 'import numpy as np\n'), ((4779, 4789), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (4785, 4789), True, 'import numpy as np\n'), ((6482, 6496), 'numpy.dot', 'np.dot', (['w.T', 'X'], {}), '(w.T, X)\n', (6488, 6496), True, 'import numpy as np\n'), ((10003, 10017), 'numpy.dot', 'np.dot', (['w.T', 'X'], {}), '(w.T, X)\n', (10009, 10017), True, 'import numpy as np\n'), ((4877, 4893), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (4885, 4893), True, 'import numpy as np\n'), ((6560, 6569), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (6566, 6569), True, 'import numpy as np\n'), ((6582, 6595), 'numpy.log', 'np.log', (['(1 - A)'], {}), '(1 - A)\n', (6588, 6595), True, 'import numpy as np\n'), ((1500, 1533), 'numpy.squeeze', 'np.squeeze', (['train_set_y[:, index]'], {}), '(train_set_y[:, index])\n', (1510, 1533), True, 'import numpy as np\n'), ((12060, 12096), 'numpy.abs', 'np.abs', (['(Y_prediction_train - Y_train)'], {}), '(Y_prediction_train - Y_train)\n', (12066, 12096), True, 'import numpy as np\n'), ((12160, 12194), 'numpy.abs', 'np.abs', (['(Y_prediction_test - Y_test)'], {}), '(Y_prediction_test - Y_test)\n', (12166, 12194), True, 'import numpy as np\n')]
|
import botorch
import gpytorch
from torch import Tensor
from gpytorch.kernels.kernel import Kernel
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors.torch_priors import GammaPrior
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.models.utils import gpt_posterior_settings
from dagbo.models.dag.node import Node, SingleTaskGP_Node
def make_gps(x: Tensor, y: Tensor, gp_name: str) -> SingleTaskGP:
# noiseless modelling
#likelihood = gpytorch.likelihoods.GaussianLikelihood()
#likelihood.noise = 1e-4
#likelihood.noise_covar.raw_noise.requires_grad_(False)
# get model
#model = SingleTaskGP(x, y, likelihood)
model = SingleTaskGP(x, y)
# equip
#model.likelihood = likelihood
model.covar_module = make_kernels(gp_name)
return model
def make_kernels(name: str) -> Kernel:
if name == "SE":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
elif name == "RQ":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RQKernel())
elif name == "MA":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(
nu=2.5, lengthscale_prior=GammaPrior(3.0, 6.0)),
outputscale_prior=GammaPrior(
2.0, 0.15))
return kernel
def fit_gpr(model: SingleTaskGP) -> None:
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.train()
fit_gpytorch_model(mll) # by default it fits with scipy, so L-BFGS-B
return None
def make_node(x: Tensor, y: Tensor, gp_name: str):
"""
for test purpose
check if Node in Dag is a sound gp
"""
class gp(Node):
def __init__(self, input_names, output_name, train_inputs,
train_targets):
super().__init__(input_names, output_name, train_inputs,
train_targets)
self.num_outputs = 1
def posterior(self,
X: Tensor,
observation_noise=False,
**kwargs) -> GPyTorchPosterior:
self.eval() # make sure model is in eval mode
with gpt_posterior_settings():
mvn = self(X)
posterior = GPyTorchPosterior(mvn=mvn)
return posterior
if len(x.shape) == 2:
x = x.unsqueeze(0)
if len(y.shape) == 2:
y = y.unsqueeze(0)
y = y.squeeze(-1)
#print("node input:")
#print(x.shape) # [batch_size, q, dim]
#print(y.shape) # [batch_size, q]
#print()
model = gp([f"x{i}" for i in range(20)], "final", x, y)
model.covar = make_kernels(gp_name)
return model
def make_SingleTaskGP_node(x: Tensor, y: Tensor, gp_name: str):
"""
for test purpose
if SingleTaskGP_Node in Dag is a sound gp
"""
if len(x.shape) == 2:
x = x.unsqueeze(0)
if len(y.shape) == 2:
y = y.unsqueeze(0)
y = y.squeeze(-1)
#print("node input:")
#print(x.shape) # [batch_size, q, dim]
#print(y.shape) # [batch_size, q]
#print()
class gp(SingleTaskGP_Node):
def __init__(self, input_names, output_name, train_inputs,
train_targets):
super().__init__(input_names, output_name, train_inputs,
train_targets)
# expose posterior to print shape
def posterior(self,
X: Tensor,
observation_noise=False,
**kwargs) -> GPyTorchPosterior:
self.eval() # make sure model is in eval mode
with gpt_posterior_settings():
mvn = self(X)
#print()
#print("X::: ", X.shape)
#print(X)
#print("mvn:::")
#print(mvn)
#print(mvn.loc)
#print()
#print(mvn.loc) # can verify identical mvn
posterior = GPyTorchPosterior(mvn=mvn)
return posterior
#model = SingleTaskGP_Node([f"x{i}" for i in range(20)], "final", x, y)
model = gp([f"x{i}" for i in range(20)], "final", x, y)
model.covar_module = make_kernels(gp_name)
return model
|
[
"gpytorch.mlls.exact_marginal_log_likelihood.ExactMarginalLogLikelihood",
"botorch.posteriors.gpytorch.GPyTorchPosterior",
"gpytorch.kernels.RBFKernel",
"gpytorch.kernels.RQKernel",
"gpytorch.priors.torch_priors.GammaPrior",
"botorch.models.SingleTaskGP",
"botorch.models.utils.gpt_posterior_settings",
"botorch.fit.fit_gpytorch_model"
] |
[((808, 826), 'botorch.models.SingleTaskGP', 'SingleTaskGP', (['x', 'y'], {}), '(x, y)\n', (820, 826), False, 'from botorch.models import SingleTaskGP\n'), ((1546, 1597), 'gpytorch.mlls.exact_marginal_log_likelihood.ExactMarginalLogLikelihood', 'ExactMarginalLogLikelihood', (['model.likelihood', 'model'], {}), '(model.likelihood, model)\n', (1572, 1597), False, 'from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\n'), ((1618, 1641), 'botorch.fit.fit_gpytorch_model', 'fit_gpytorch_model', (['mll'], {}), '(mll)\n', (1636, 1641), False, 'from botorch.fit import fit_gpytorch_model\n'), ((1047, 1075), 'gpytorch.kernels.RBFKernel', 'gpytorch.kernels.RBFKernel', ([], {}), '()\n', (1073, 1075), False, 'import gpytorch\n'), ((2426, 2452), 'botorch.posteriors.gpytorch.GPyTorchPosterior', 'GPyTorchPosterior', ([], {'mvn': 'mvn'}), '(mvn=mvn)\n', (2443, 2452), False, 'from botorch.posteriors.gpytorch import GPyTorchPosterior\n'), ((4113, 4139), 'botorch.posteriors.gpytorch.GPyTorchPosterior', 'GPyTorchPosterior', ([], {'mvn': 'mvn'}), '(mvn=mvn)\n', (4130, 4139), False, 'from botorch.posteriors.gpytorch import GPyTorchPosterior\n'), ((1146, 1173), 'gpytorch.kernels.RQKernel', 'gpytorch.kernels.RQKernel', ([], {}), '()\n', (1171, 1173), False, 'import gpytorch\n'), ((2346, 2370), 'botorch.models.utils.gpt_posterior_settings', 'gpt_posterior_settings', ([], {}), '()\n', (2368, 2370), False, 'from botorch.models.utils import gpt_posterior_settings\n'), ((3795, 3819), 'botorch.models.utils.gpt_posterior_settings', 'gpt_posterior_settings', ([], {}), '()\n', (3817, 3819), False, 'from botorch.models.utils import gpt_posterior_settings\n'), ((1400, 1421), 'gpytorch.priors.torch_priors.GammaPrior', 'GammaPrior', (['(2.0)', '(0.15)'], {}), '(2.0, 0.15)\n', (1410, 1421), False, 'from gpytorch.priors.torch_priors import GammaPrior\n'), ((1313, 1333), 'gpytorch.priors.torch_priors.GammaPrior', 'GammaPrior', (['(3.0)', '(6.0)'], {}), '(3.0, 6.0)\n', (1323, 1333), False, 'from gpytorch.priors.torch_priors import GammaPrior\n')]
|
import math
def poly2(a,b,c):
''' solves quadratic equations of the
form ax^2 + bx + c = 0 '''
x1 = (-b + math.sqrt(b**2 - 4*a*c))/(2*a)
x2 = (-b - math.sqrt(b**2 - 4*a*c))/(2*a)
return x1, x2
|
[
"math.sqrt"
] |
[((123, 152), 'math.sqrt', 'math.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (132, 152), False, 'import math\n'), ((169, 198), 'math.sqrt', 'math.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (178, 198), False, 'import math\n')]
|
# models.py
from flask import abort, redirect, request, url_for
from flask_admin import form
from flask_admin.contrib.sqla import ModelView
from flask_security import current_user, RoleMixin, UserMixin
from wtforms import SelectField, TextAreaField
from reel_miami import db
# Database models
class Venue(db.Model):
__tablename__ = 'venues'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
address1 = db.Column(db.String, nullable=False)
address2 = db.Column(db.String, nullable=True)
city = db.Column(db.String, nullable=False)
state = db.Column(db.String, nullable=False)
postal_code = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
venue_photo = db.Column(db.String(20), nullable=False,
default='default.jpg')
web_url = db.Column(db.String, nullable=True)
phone_number = db.Column(db.String, nullable=True)
films = db.relationship('Film', backref='venue', lazy=True)
def __repr__(self):
return f'<Venue(name={self.name})>'
def __str__(self):
return f'{self.name}'
class Film(db.Model):
__tablename__ = 'films'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
running_time = db.Column(db.String, nullable=False)
director = db.Column(db.String, nullable=False)
year = db.Column(db.String, nullable=False)
venue_id = db.Column(db.Integer, db.ForeignKey('venues.id'))
showtimes = db.relationship('Showtime', backref='film', lazy=True)
def __repr__(self):
return f'<Film(name={self.name})>'
def __str__(self):
return f'{self.name}'
class Showtime(db.Model):
__tablename__ = 'showtimes'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.String, nullable=False)
time = db.Column(db.String, nullable=False)
ticketing_link = db.Column(db.String, nullable=False)
film_id = db.Column(db.Integer, db.ForeignKey('films.id'))
def __repr__(self):
return f'<Showtime(date={self.date}, time={self.time})>'
def __str__(self):
return f'{self.name}'
'''
The Flask-Security models that are stored in the database.
'''
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(),
db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(),
db.ForeignKey('roles.id')))
class Role(db.Model, RoleMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __str__(self):
return f'{self.name}'
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __str__(self):
return f'{self.email}'
"""
The following classes modify the primary Flask-Admin ModelView in order to
accomplish various tasks.
"""
class AdminVenueView(ModelView):
column_labels = {'address1': 'Address', 'address2': 'Address 2',
'web_url': 'Website'}
column_default_sort = 'name'
column_exclude_list = ('description')
form_columns = ('name', 'address1', 'address2', 'city', 'state',
'postal_code', 'description', 'web_url', 'phone_number',
'venue_photo')
form_excluded_columns = ['films']
form_overrides = {'description': TextAreaField}
form_widget_args = {'address1': {
'placeholder': 'Primary address'},
'address2': {
'placeholder': 'Suite/Bulding/Other'
},
'description': {
'rows': 5,
'style': 'color: black',
'maxlength': 1000,
'placeholder': 'max 1000 characters',
'spellcheck': 'true'
},
'phone_number': {
'placeholder': '123.456.7890'
}
}
form_extra_fields = {
'venue_photo': form.ImageUploadField(
'Venue Photo',
base_path='reel_miami/static/img/venues',
url_relative_path='img/venues/',
),
'state': SelectField(label='State', choices=[('FL', 'Florida')],
default='FL')
}
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and (current_user.has_role('Superuser')
or current_user.has_role('User')))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
class AdminRoleView(ModelView):
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and current_user.has_role('Superuser'))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
class AdminUserView(ModelView):
column_exclude_list = 'password'
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and current_user.has_role('Superuser'))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
|
[
"wtforms.SelectField",
"reel_miami.db.backref",
"reel_miami.db.String",
"flask_admin.form.ImageUploadField",
"reel_miami.db.relationship",
"reel_miami.db.Boolean",
"flask_security.current_user.has_role",
"flask.abort",
"reel_miami.db.Integer",
"flask.url_for",
"reel_miami.db.ForeignKey",
"reel_miami.db.Column",
"reel_miami.db.DateTime"
] |
[((360, 399), 'reel_miami.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (369, 399), False, 'from reel_miami import db\n'), ((411, 447), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (420, 447), False, 'from reel_miami import db\n'), ((463, 499), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (472, 499), False, 'from reel_miami import db\n'), ((515, 550), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(True)'}), '(db.String, nullable=True)\n', (524, 550), False, 'from reel_miami import db\n'), ((562, 598), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (571, 598), False, 'from reel_miami import db\n'), ((611, 647), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (620, 647), False, 'from reel_miami import db\n'), ((666, 702), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (675, 702), False, 'from reel_miami import db\n'), ((721, 757), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (730, 757), False, 'from reel_miami import db\n'), ((882, 917), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(True)'}), '(db.String, nullable=True)\n', (891, 917), False, 'from reel_miami import db\n'), ((937, 972), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(True)'}), '(db.String, nullable=True)\n', (946, 972), False, 'from reel_miami import db\n'), ((986, 1037), 'reel_miami.db.relationship', 'db.relationship', (['"""Film"""'], {'backref': '"""venue"""', 'lazy': '(True)'}), "('Film', backref='venue', lazy=True)\n", (1001, 1037), False, 'from reel_miami import db\n'), ((1223, 1262), 'reel_miami.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1232, 1262), False, 'from reel_miami import db\n'), ((1274, 1310), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1283, 1310), False, 'from reel_miami import db\n'), ((1330, 1366), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1339, 1366), False, 'from reel_miami import db\n'), ((1382, 1418), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1391, 1418), False, 'from reel_miami import db\n'), ((1430, 1466), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1439, 1466), False, 'from reel_miami import db\n'), ((1550, 1604), 'reel_miami.db.relationship', 'db.relationship', (['"""Showtime"""'], {'backref': '"""film"""', 'lazy': '(True)'}), "('Showtime', backref='film', lazy=True)\n", (1565, 1604), False, 'from reel_miami import db\n'), ((1797, 1836), 'reel_miami.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1806, 1836), False, 'from reel_miami import db\n'), ((1848, 1884), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1857, 1884), False, 'from reel_miami import db\n'), ((1896, 1932), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1905, 1932), False, 'from reel_miami import db\n'), ((1954, 1990), 'reel_miami.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (1963, 1990), False, 'from reel_miami import db\n'), ((2881, 2920), 'reel_miami.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2890, 2920), False, 'from reel_miami import db\n'), ((786, 799), 'reel_miami.db.String', 'db.String', (['(20)'], {}), '(20)\n', (795, 799), False, 'from reel_miami import db\n'), ((1505, 1531), 'reel_miami.db.ForeignKey', 'db.ForeignKey', (['"""venues.id"""'], {}), "('venues.id')\n", (1518, 1531), False, 'from reel_miami import db\n'), ((2028, 2053), 'reel_miami.db.ForeignKey', 'db.ForeignKey', (['"""films.id"""'], {}), "('films.id')\n", (2041, 2053), False, 'from reel_miami import db\n'), ((2352, 2364), 'reel_miami.db.Integer', 'db.Integer', ([], {}), '()\n', (2362, 2364), False, 'from reel_miami import db\n'), ((2399, 2424), 'reel_miami.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (2412, 2424), False, 'from reel_miami import db\n'), ((2471, 2483), 'reel_miami.db.Integer', 'db.Integer', ([], {}), '()\n', (2481, 2483), False, 'from reel_miami import db\n'), ((2518, 2543), 'reel_miami.db.ForeignKey', 'db.ForeignKey', (['"""roles.id"""'], {}), "('roles.id')\n", (2531, 2543), False, 'from reel_miami import db\n'), ((2629, 2641), 'reel_miami.db.Integer', 'db.Integer', ([], {}), '()\n', (2639, 2641), False, 'from reel_miami import db\n'), ((2682, 2695), 'reel_miami.db.String', 'db.String', (['(80)'], {}), '(80)\n', (2691, 2695), False, 'from reel_miami import db\n'), ((2738, 2752), 'reel_miami.db.String', 'db.String', (['(255)'], {}), '(255)\n', (2747, 2752), False, 'from reel_miami import db\n'), ((2943, 2957), 'reel_miami.db.String', 'db.String', (['(255)'], {}), '(255)\n', (2952, 2957), False, 'from reel_miami import db\n'), ((2997, 3011), 'reel_miami.db.String', 'db.String', (['(255)'], {}), '(255)\n', (3006, 3011), False, 'from reel_miami import db\n'), ((3036, 3048), 'reel_miami.db.Boolean', 'db.Boolean', ([], {}), '()\n', (3046, 3048), False, 'from reel_miami import db\n'), ((3079, 3092), 'reel_miami.db.DateTime', 'db.DateTime', ([], {}), '()\n', (3090, 3092), False, 'from reel_miami import db\n'), ((4778, 4894), 'flask_admin.form.ImageUploadField', 'form.ImageUploadField', (['"""Venue Photo"""'], {'base_path': '"""reel_miami/static/img/venues"""', 'url_relative_path': '"""img/venues/"""'}), "('Venue Photo', base_path=\n 'reel_miami/static/img/venues', url_relative_path='img/venues/')\n", (4799, 4894), False, 'from flask_admin import form\n'), ((4975, 5044), 'wtforms.SelectField', 'SelectField', ([], {'label': '"""State"""', 'choices': "[('FL', 'Florida')]", 'default': '"""FL"""'}), "(label='State', choices=[('FL', 'Florida')], default='FL')\n", (4986, 5044), False, 'from wtforms import SelectField, TextAreaField\n'), ((3189, 3224), 'reel_miami.db.backref', 'db.backref', (['"""users"""'], {'lazy': '"""dynamic"""'}), "('users', lazy='dynamic')\n", (3199, 3224), False, 'from reel_miami import db\n'), ((5930, 5964), 'flask_security.current_user.has_role', 'current_user.has_role', (['"""Superuser"""'], {}), "('Superuser')\n", (5951, 5964), False, 'from flask_security import current_user, RoleMixin, UserMixin\n'), ((6615, 6649), 'flask_security.current_user.has_role', 'current_user.has_role', (['"""Superuser"""'], {}), "('Superuser')\n", (6636, 6649), False, 'from flask_security import current_user, RoleMixin, UserMixin\n'), ((5228, 5262), 'flask_security.current_user.has_role', 'current_user.has_role', (['"""Superuser"""'], {}), "('Superuser')\n", (5249, 5262), False, 'from flask_security import current_user, RoleMixin, UserMixin\n'), ((5287, 5316), 'flask_security.current_user.has_role', 'current_user.has_role', (['"""User"""'], {}), "('User')\n", (5308, 5316), False, 'from flask_security import current_user, RoleMixin, UserMixin\n'), ((5627, 5637), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (5632, 5637), False, 'from flask import abort, redirect, request, url_for\n'), ((6274, 6284), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (6279, 6284), False, 'from flask import abort, redirect, request, url_for\n'), ((6959, 6969), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (6964, 6969), False, 'from flask import abort, redirect, request, url_for\n'), ((5712, 5755), 'flask.url_for', 'url_for', (['"""security.login"""'], {'next': 'request.url'}), "('security.login', next=request.url)\n", (5719, 5755), False, 'from flask import abort, redirect, request, url_for\n'), ((6359, 6402), 'flask.url_for', 'url_for', (['"""security.login"""'], {'next': 'request.url'}), "('security.login', next=request.url)\n", (6366, 6402), False, 'from flask import abort, redirect, request, url_for\n'), ((7044, 7087), 'flask.url_for', 'url_for', (['"""security.login"""'], {'next': 'request.url'}), "('security.login', next=request.url)\n", (7051, 7087), False, 'from flask import abort, redirect, request, url_for\n')]
|
import os
import unittest
import torch
import numpy as np
from PIL import Image
from embryovision import util
from embryovision.tests.common import get_loadable_filenames
class TestReadImage(unittest.TestCase):
def test_read_image_returns_numpy(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertIsInstance(image, np.ndarray)
def test_read_image_returns_correct_shape(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertEqual(image.ndim, 3)
self.assertEqual(image.shape[2], 3)
def test_read_image_returns_float_on_01(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 1)
class TestReadImageForTorch(unittest.TestCase):
def test_read_image_for_torch_returns_torch(self):
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
self.assertIsInstance(as_torch, torch.Tensor)
def test_read_image_for_torch_returns_correct_shape(self):
# torch expects (n_images, channels, size
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
n_channels = 3
self.assertEqual(as_torch.size()[:2], (len(filenames), n_channels))
class TestLoadAndCropImage(unittest.TestCase):
def test_returns_pil_image(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
self.assertIsInstance(image, Image.Image)
def test_output_image_is_correct_shape(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 100, 100)
shape = (150, 140)
image = util.load_and_crop_image(filename, box, output_shape=shape)
self.assertEqual(image.size, shape)
def test_crop_box_is_used_with_resize_nearest(self):
# we crop to a 1 px image, and check that all image values
# are the same value
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
correct_px_value = np.array(Image.open(filename))[box[0], box[1]]
self.assertTrue(np.all(np.array(image) == correct_px_value))
class TestLoadImageIntoRam(unittest.TestCase):
def test_load_image_as_bytes_io(self):
filename = get_loadable_filenames()[0]
loaded_into_ram = util.load_image_into_ram(filename)
image0 = util.read_image(filename)
image1 = util.read_image(loaded_into_ram)
self.assertTrue(np.all(image0 == image1))
class TestTransformingCollection(unittest.TestCase):
def test_getitem_transforms(self):
np.random.seed(400)
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
index = 0
self.assertEqual(transform(data[index]), loader[index])
def test_len(self):
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
self.assertEqual(len(loader), data.size)
def test_on_images(self):
filenames = get_loadable_filenames()
images_ram = [util.load_image_into_ram(nm) for nm in filenames]
loader = util.TransformingCollection(images_ram, util.read_image)
index = 0
image_filename = util.read_image(filenames[index])
image_loader = loader[index]
self.assertTrue(np.all(image_filename == image_loader))
class TestMisc(unittest.TestCase):
def test_split_all(self):
dummy_folder = '/some/long/directory/structure/'
filename = 'D2017_05_05_S1477_I313_pdb/WELL06/F0/016.jpg'
fullname = os.path.join(dummy_folder, filename)
fullname_f0_split = util.split_all(fullname)
correct_answer = (
'/', 'some', 'long', 'directory', 'structure',
'D2017_05_05_S1477_I313_pdb', 'WELL06', 'F0', '016.jpg')
self.assertEqual(fullname_f0_split, correct_answer)
def test_augment_focus(self):
filename = get_loadable_filenames()[0]
augmented = util.augment_focus(filename)
for foundname, focus_correct in zip(augmented, ['F-15', 'F0', 'F15']):
*head, focus_found, image_number = util.split_all(foundname)
self.assertTrue(os.path.exists(foundname))
self.assertEqual(focus_found, focus_correct)
def test_augment_focus_raises_error_when_no_filename(self):
unloadable_filename = '/some/wrong/directory/structure/001.jpg'
assert not os.path.exists(unloadable_filename)
self.assertRaises(
FileNotFoundError,
util.augment_focus,
unloadable_filename,)
def make_loader():
filenames = get_loadable_filenames()
return util.ImageTransformingCollection(filenames)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"embryovision.util.augment_focus",
"numpy.random.seed",
"numpy.random.randn",
"embryovision.util.read_image",
"os.path.exists",
"embryovision.util.split_all",
"PIL.Image.open",
"embryovision.util.ImageTransformingCollection",
"embryovision.util.TransformingCollection",
"numpy.array",
"embryovision.util.read_images_for_torch",
"embryovision.util.load_and_crop_image",
"os.path.join",
"embryovision.util.load_image_into_ram",
"numpy.all",
"embryovision.tests.common.get_loadable_filenames"
] |
[((4960, 4984), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (4982, 4984), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((4996, 5039), 'embryovision.util.ImageTransformingCollection', 'util.ImageTransformingCollection', (['filenames'], {}), '(filenames)\n', (5028, 5039), False, 'from embryovision import util\n'), ((5073, 5088), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5086, 5088), False, 'import unittest\n'), ((323, 348), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (338, 348), False, 'from embryovision import util\n'), ((515, 540), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (530, 540), False, 'from embryovision import util\n'), ((740, 765), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (755, 765), False, 'from embryovision import util\n'), ((984, 1008), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1006, 1008), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1028, 1065), 'embryovision.util.read_images_for_torch', 'util.read_images_for_torch', (['filenames'], {}), '(filenames)\n', (1054, 1065), False, 'from embryovision import util\n'), ((1254, 1278), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1276, 1278), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1298, 1335), 'embryovision.util.read_images_for_torch', 'util.read_images_for_torch', (['filenames'], {}), '(filenames)\n', (1324, 1335), False, 'from embryovision import util\n'), ((1612, 1651), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {}), '(filename, box)\n', (1636, 1651), False, 'from embryovision import util\n'), ((1874, 1933), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {'output_shape': 'shape'}), '(filename, box, output_shape=shape)\n', (1898, 1933), False, 'from embryovision import util\n'), ((2222, 2261), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {}), '(filename, box)\n', (2246, 2261), False, 'from embryovision import util\n'), ((2571, 2605), 'embryovision.util.load_image_into_ram', 'util.load_image_into_ram', (['filename'], {}), '(filename)\n', (2595, 2605), False, 'from embryovision import util\n'), ((2623, 2648), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (2638, 2648), False, 'from embryovision import util\n'), ((2666, 2698), 'embryovision.util.read_image', 'util.read_image', (['loaded_into_ram'], {}), '(loaded_into_ram)\n', (2681, 2698), False, 'from embryovision import util\n'), ((2851, 2870), 'numpy.random.seed', 'np.random.seed', (['(400)'], {}), '(400)\n', (2865, 2870), True, 'import numpy as np\n'), ((2886, 2905), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (2901, 2905), True, 'import numpy as np\n'), ((2960, 3004), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['data', 'transform'], {}), '(data, transform)\n', (2987, 3004), False, 'from embryovision import util\n'), ((3128, 3147), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (3143, 3147), True, 'import numpy as np\n'), ((3202, 3246), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['data', 'transform'], {}), '(data, transform)\n', (3229, 3246), False, 'from embryovision import util\n'), ((3347, 3371), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (3369, 3371), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((3461, 3517), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['images_ram', 'util.read_image'], {}), '(images_ram, util.read_image)\n', (3488, 3517), False, 'from embryovision import util\n'), ((3562, 3595), 'embryovision.util.read_image', 'util.read_image', (['filenames[index]'], {}), '(filenames[index])\n', (3577, 3595), False, 'from embryovision import util\n'), ((3906, 3942), 'os.path.join', 'os.path.join', (['dummy_folder', 'filename'], {}), '(dummy_folder, filename)\n', (3918, 3942), False, 'import os\n'), ((3972, 3996), 'embryovision.util.split_all', 'util.split_all', (['fullname'], {}), '(fullname)\n', (3986, 3996), False, 'from embryovision import util\n'), ((4314, 4342), 'embryovision.util.augment_focus', 'util.augment_focus', (['filename'], {}), '(filename)\n', (4332, 4342), False, 'from embryovision import util\n'), ((279, 303), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (301, 303), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((471, 495), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (493, 495), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((696, 720), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (718, 720), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1541, 1565), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1563, 1565), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1772, 1796), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1794, 1796), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2151, 2175), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (2173, 2175), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2517, 2541), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (2539, 2541), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2723, 2747), 'numpy.all', 'np.all', (['(image0 == image1)'], {}), '(image0 == image1)\n', (2729, 2747), True, 'import numpy as np\n'), ((3394, 3422), 'embryovision.util.load_image_into_ram', 'util.load_image_into_ram', (['nm'], {}), '(nm)\n', (3418, 3422), False, 'from embryovision import util\n'), ((3657, 3695), 'numpy.all', 'np.all', (['(image_filename == image_loader)'], {}), '(image_filename == image_loader)\n', (3663, 3695), True, 'import numpy as np\n'), ((4266, 4290), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (4288, 4290), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((4469, 4494), 'embryovision.util.split_all', 'util.split_all', (['foundname'], {}), '(foundname)\n', (4483, 4494), False, 'from embryovision import util\n'), ((4763, 4798), 'os.path.exists', 'os.path.exists', (['unloadable_filename'], {}), '(unloadable_filename)\n', (4777, 4798), False, 'import os\n'), ((2299, 2319), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2309, 2319), False, 'from PIL import Image\n'), ((4523, 4548), 'os.path.exists', 'os.path.exists', (['foundname'], {}), '(foundname)\n', (4537, 4548), False, 'import os\n'), ((2368, 2383), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2376, 2383), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
# Number of Epochs
num_epochs = 2
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 100
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
# =====================================================================================
# =====================================================================================
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# =====================================================================================
# =====================================================================================
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
inp = loaded_graph.get_tensor_by_name("input:0")
istate = loaded_graph.get_tensor_by_name("initial_state:0")
fstate = loaded_graph.get_tensor_by_name("final_state:0")
probs = loaded_graph.get_tensor_by_name("probs:0")
return inp, istate, fstate, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# =====================================================================================
# =====================================================================================
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
index = np.random.choice(len(probabilities), p=probabilities)
return int_to_vocab[index]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# =====================================================================================
# =====================================================================================
gen_length = 20
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length - 1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# =====================================================================================
# =====================================================================================
|
[
"helper.save_params",
"tensorflow.train.import_meta_graph",
"helper.load_params",
"tensorflow.Session",
"problem_unittests.test_pick_word",
"problem_unittests.test_get_tensors",
"helper.load_preprocess",
"numpy.array",
"tensorflow.Graph"
] |
[((531, 573), 'helper.save_params', 'helper.save_params', (['(seq_length, save_dir)'], {}), '((seq_length, save_dir))\n', (549, 573), False, 'import helper\n'), ((839, 863), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (861, 863), False, 'import helper\n'), ((887, 907), 'helper.load_params', 'helper.load_params', ([], {}), '()\n', (905, 907), False, 'import helper\n'), ((1736, 1771), 'problem_unittests.test_get_tensors', 'tests.test_get_tensors', (['get_tensors'], {}), '(get_tensors)\n', (1758, 1771), True, 'import problem_unittests as tests\n'), ((2435, 2466), 'problem_unittests.test_pick_word', 'tests.test_pick_word', (['pick_word'], {}), '(pick_word)\n', (2455, 2466), True, 'import problem_unittests as tests\n'), ((2818, 2828), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2826, 2828), True, 'import tensorflow as tf\n'), ((2834, 2864), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'loaded_graph'}), '(graph=loaded_graph)\n', (2844, 2864), True, 'import tensorflow as tf\n'), ((2910, 2956), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(load_dir + '.meta')"], {}), "(load_dir + '.meta')\n", (2936, 2956), True, 'import tensorflow as tf\n'), ((3234, 3249), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (3242, 3249), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains tests for accessing env vars as Django's secret key.
import pytest
from djangokeys.core.djangokeys import DjangoKeys
from djangokeys.exceptions import EnvironmentVariableNotFound
from djangokeys.exceptions import ValueIsEmpty
from tests.files import EMPTY_ENV_PATH
from tests.utils.environment_vars import use_environment_variable
def test__django_keys_secret_key__not_found():
""" An appropriate exception is raised when env var is not set.
"""
keys = DjangoKeys(EMPTY_ENV_PATH)
with pytest.raises(EnvironmentVariableNotFound):
keys.secret_key("DOES_NOT_EXIST")
def test__django_keys_str__empty_string():
""" An empty value cannot be used as Django's secret key.
"""
with use_environment_variable('SECRET_KEY', ''):
keys = DjangoKeys(EMPTY_ENV_PATH)
with pytest.raises(ValueIsEmpty):
keys.secret_key('SECRET_KEY')
def test__django_keys_str__regular_string():
""" A string of characters can be used as Django's secret key.
"""
with use_environment_variable('SECRET_KEY', '<KEY>'):
keys = DjangoKeys(EMPTY_ENV_PATH)
assert keys.secret_key("SECRET_KEY") == "<KEY>"
|
[
"pytest.raises",
"tests.utils.environment_vars.use_environment_variable",
"djangokeys.core.djangokeys.DjangoKeys"
] |
[((529, 555), 'djangokeys.core.djangokeys.DjangoKeys', 'DjangoKeys', (['EMPTY_ENV_PATH'], {}), '(EMPTY_ENV_PATH)\n', (539, 555), False, 'from djangokeys.core.djangokeys import DjangoKeys\n'), ((565, 607), 'pytest.raises', 'pytest.raises', (['EnvironmentVariableNotFound'], {}), '(EnvironmentVariableNotFound)\n', (578, 607), False, 'import pytest\n'), ((775, 817), 'tests.utils.environment_vars.use_environment_variable', 'use_environment_variable', (['"""SECRET_KEY"""', '""""""'], {}), "('SECRET_KEY', '')\n", (799, 817), False, 'from tests.utils.environment_vars import use_environment_variable\n'), ((834, 860), 'djangokeys.core.djangokeys.DjangoKeys', 'DjangoKeys', (['EMPTY_ENV_PATH'], {}), '(EMPTY_ENV_PATH)\n', (844, 860), False, 'from djangokeys.core.djangokeys import DjangoKeys\n'), ((1076, 1123), 'tests.utils.environment_vars.use_environment_variable', 'use_environment_variable', (['"""SECRET_KEY"""', '"""<KEY>"""'], {}), "('SECRET_KEY', '<KEY>')\n", (1100, 1123), False, 'from tests.utils.environment_vars import use_environment_variable\n'), ((1140, 1166), 'djangokeys.core.djangokeys.DjangoKeys', 'DjangoKeys', (['EMPTY_ENV_PATH'], {}), '(EMPTY_ENV_PATH)\n', (1150, 1166), False, 'from djangokeys.core.djangokeys import DjangoKeys\n'), ((874, 901), 'pytest.raises', 'pytest.raises', (['ValueIsEmpty'], {}), '(ValueIsEmpty)\n', (887, 901), False, 'import pytest\n')]
|
from examplepackage.badmodule import bad_function
def test_bad_function():
assert bad_function(1) == 1
|
[
"examplepackage.badmodule.bad_function"
] |
[((88, 103), 'examplepackage.badmodule.bad_function', 'bad_function', (['(1)'], {}), '(1)\n', (100, 103), False, 'from examplepackage.badmodule import bad_function\n')]
|
import os
import pickle
from lib.model.model import Model
class PersistenceHandler:
def __init__(self, folder):
self.__model_file_name = os.path.join("model.bin")
def store_model(self, model):
"""
@type model: Model
@return: None
"""
with open(self.__model_file_name, "wb") as output_file:
pickle.dump(model, output_file)
def load_model(self):
"""
@rtype: Model
"""
with open(self.__model_file_name, "rb") as input_file:
return pickle.load(input_file)
|
[
"pickle.dump",
"pickle.load",
"os.path.join"
] |
[((160, 185), 'os.path.join', 'os.path.join', (['"""model.bin"""'], {}), "('model.bin')\n", (172, 185), False, 'import os\n'), ((378, 409), 'pickle.dump', 'pickle.dump', (['model', 'output_file'], {}), '(model, output_file)\n', (389, 409), False, 'import pickle\n'), ((572, 595), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (583, 595), False, 'import pickle\n')]
|
import discord
from commands.framework.CommandBase import CommandBase
class HelpCommand(CommandBase):
def __init__(self):
super(HelpCommand, self).__init__('help')
async def execute(self, client, message, args):
embed = discord.Embed(
title="Help Page",
description="Prefix: **!**",
color=discord.Colour.red()
)
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/519223258428735511/520234344313257984/badboy.jpg")
embed.add_field(name="!whois", value="Verify informations abou the site.", inline=True)
embed.add_field(name="!ping", value="Ping some target.", inline=True)
embed.add_field(name="!hibp", value="Check if your email got leaked.", inline=True)
embed.add_field(name="!geoip", value="GeoIp lookup.", inline=True)
embed.add_field(name="!nmap", value="Simple port scan an ip address.", inline=True)
embed.add_field(name="!sqli", value="Test if a url is vulnerable to SQLi.", inline=True)
embed.add_field(name="!shodan", value="Search host in shodan.", inline=True)
embed.add_field(name="!exploitdb", value="Search exploits in ExploitDB.", inline=True)
embed.add_field(name="!reverseip", value="Verify domains in a host.", inline=True)
embed.set_footer(text="Type ![command] for more info about command")
await client.send_message(message.channel, embed=embed)
|
[
"discord.Colour.red"
] |
[((354, 374), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (372, 374), False, 'import discord\n')]
|
from os import listdir
from os.path import isdir, isfile, join
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from utils import shelf
def dlist(key, dat):
r"""Runs over a list of dictionaries and outputs a list of values corresponding to `key`
Short version (no checks): return np.array([d[key] for d in dat])
"""
ret = []
for i, d in enumerate(dat):
if key in d:
ret.append(d[key])
else:
print('key {} is not in dat[{}]. Skip.'.format(key, i))
return np.array(ret)
def get_data(select_dict, ARGS, key_list, DAT):
data = []
for sel, key in zip(select_dict, key_list):
# Select DAT
k, v = next(iter(sel.items()))
dat = [da[0] for da in zip(DAT, ARGS) if k in da[1] and da[1][k] == v][0]
data.append(dlist(key, dat))
return data
def color_bplot(bplot, colors):
r"""Color the boxplots"""
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for median in bplot['medians']:
median.set(color='k', linewidth=1.5,)
def label_axis(ax, labels, xpos, ypos, fontsize=16, target_fdr=0.1):
# Partially remove frame
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# y label
ax.set_ylabel('Power and FDR', fontsize=fontsize)
ax.set_ylim([-0.05, 1.05])
# Hortizontal line for target fdr
if target_fdr:
ax.plot(ax.get_xlim(), [target_fdr, target_fdr], '--r')
# New Axis
new_ax = ax.twiny()
new_ax.set_xticks(xpos)
new_ax.set_xticklabels(labels)
new_ax.xaxis.set_ticks_position('bottom') # set the position of the second x-axis to bottom
new_ax.xaxis.set_label_position('bottom') # set the position of the second x-axis to bottom
new_ax.spines['bottom'].set_position(('outward', ypos)) # positions below
# Remove frame for new_ax
new_ax.spines['bottom'].set_visible(False)
new_ax.spines['top'].set_visible(False)
new_ax.spines['left'].set_visible(False)
new_ax.spines['right'].set_visible(False)
new_ax.tick_params(length=0, labelsize=fontsize)
new_ax.set_xlim(ax.get_xlim())
return new_ax
if __name__ == "__main__":
# Load data
PATH = 'output/'
DIRS = [d for d in listdir(PATH) if isdir(join(PATH, d))]
FILES = [join(PATH, d, f) for d in DIRS for f in listdir(join(PATH, d))
if isfile(join(PATH, d, f)) and f[-3:]=='.pt']
ARGS, DAT, MODELS = [], [], []
for f in FILES:
sh = shelf()._load(f)
ARGS.append(sh.args)
if 'd' in sh:
DAT.append(sh['d'])
MODELS.append(sh.args['model'])
else:
print("WARNING: There is no data field d field in file {}. Skip.".format(f))
continue
# ---------------------------
# Process data
# ---------------------------
select_dict, key_list, labels, positions, ax_labels, ax_positions = [], [], [], [-2], [], [-2]
# Baseline models
for m, l in zip(['en', 'rf'], ['Elastic Net', 'Random Forest']):
if m in MODELS:
select_dict += 4*[{'model': m}]
key_list += ['tpr_selected', 'fdr_selected', 'hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR', 'FDR', 'TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p, 4+p, 5+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + len(l)/2]
# Our models
for m, l, pos in zip(['sic_supervised', 'sic'], ['Sobolev Penalty', 'SIC'], [5.5, 4]):
if m in MODELS:
select_dict += 2*[{'model': m}]
key_list += ['hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + pos]
positions.pop(0);
ax_positions.pop(0);
data = get_data(select_dict, ARGS, key_list, DAT)
# ---------------------------
# Plot
# ---------------------------
dataset = ARGS[0]['dataset'].upper()
n_samples = ARGS[0]['numSamples']
fig = plt.figure(figsize=(8, 3))
ax = plt.subplot(111)
bplot = plt.boxplot(data, positions=positions, labels=labels, patch_artist=True)
label_axis(ax, ax_labels, ax_positions, 32, fontsize=13)
color_bplot(bplot, len(positions)//2*['lightblue', 'orange'])
fig.suptitle(f'Dataset {dataset}, N={n_samples}');
fig.tight_layout()
fig.savefig(f"output/{dataset}_{n_samples}.png", bbox_inches='tight')
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.figure",
"numpy.array",
"utils.shelf",
"os.path.join",
"os.listdir"
] |
[((554, 567), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (562, 567), True, 'import numpy as np\n'), ((4199, 4225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (4209, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4235, 4251), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4246, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4265, 4337), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['data'], {'positions': 'positions', 'labels': 'labels', 'patch_artist': '(True)'}), '(data, positions=positions, labels=labels, patch_artist=True)\n', (4276, 4337), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2368), 'os.path.join', 'join', (['PATH', 'd', 'f'], {}), '(PATH, d, f)\n', (2356, 2368), False, 'from os.path import isdir, isfile, join\n'), ((2300, 2313), 'os.listdir', 'listdir', (['PATH'], {}), '(PATH)\n', (2307, 2313), False, 'from os import listdir\n'), ((2323, 2336), 'os.path.join', 'join', (['PATH', 'd'], {}), '(PATH, d)\n', (2327, 2336), False, 'from os.path import isdir, isfile, join\n'), ((2400, 2413), 'os.path.join', 'join', (['PATH', 'd'], {}), '(PATH, d)\n', (2404, 2413), False, 'from os.path import isdir, isfile, join\n'), ((2544, 2551), 'utils.shelf', 'shelf', ([], {}), '()\n', (2549, 2551), False, 'from utils import shelf\n'), ((2438, 2454), 'os.path.join', 'join', (['PATH', 'd', 'f'], {}), '(PATH, d, f)\n', (2442, 2454), False, 'from os.path import isdir, isfile, join\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import proton as _proton
import proton.handlers as _handlers
import proton.reactor as _reactor
import runpy as _runpy
import sys as _sys
import traceback as _traceback
from .common import *
_description = """
Respond to AMQP requests. Use qrespond in combination with the
qrequest command to transfer requests through an AMQP message server.
"""
_epilog = """
Example usage:
$ qrespond amqps://example.net/queue1 # Respond to requests indefinitely
$ qrespond jobs --count 1 # Respond to one request
$ qrespond jobs --upper --reverse # Transform the request text
"""
class RespondCommand(MessagingCommand):
def __init__(self, home_dir):
super().__init__(home_dir, "qrespond", _Handler(self))
self.parser.description = _description + suite_description
self.parser.epilog = url_epilog + _epilog
self.parser.add_argument("url", metavar="URL",
help="The location of a message source or target")
self.parser.add_argument("-c", "--count", metavar="COUNT", type=int,
help="Exit after processing COUNT requests")
processing_options = self.parser.add_argument_group \
("Request processing options",
"By default, qrespond returns the request text unchanged")
processing_options.add_argument("--upper", action="store_true",
help="Convert the request text to upper case")
processing_options.add_argument("--reverse", action="store_true",
help="Reverse the request text")
processing_options.add_argument("--append", metavar="STRING",
help="Append STRING to the request text")
def init(self, args):
super().init(args)
self.scheme, self.host, self.port, self.address = self.parse_url(args.url)
self.desired_messages = args.count
self.upper = args.upper
self.reverse = args.reverse
self.append = args.append
def process(self, request, response):
text = request.body
if text is None:
return
if self.upper:
text = text.upper()
if self.reverse:
text = "".join(reversed(text))
if self.append is not None:
text += self.append
response.body = text
class _Handler(MessagingHandler):
def __init__(self, command):
super().__init__(command, auto_accept=False)
self.receiver = None
self.processed_requests = 0
def open(self, event):
super().open(event)
self.receiver = event.container.create_receiver(self.connection, self.command.address)
self.sender = event.container.create_sender(self.connection, None)
def close(self, event):
super().close(event)
self.command.notice("Processed {} {}", self.processed_requests, plural("request", self.processed_requests))
def on_message(self, event):
request = event.message
self.command.info("Received request {} from {} on {}", request, self.receiver, event.connection)
response = _proton.Message()
response.address = request.reply_to
response.correlation_id = request.id
try:
self.command.process(request, response)
except:
processing_succeeded = False
_traceback.print_exc()
else:
processing_succeeded = True
self.processed_requests += 1
if processing_succeeded:
self.sender.send(response)
self.command.info("Sent response {} to address '{}' on {}", response, response.address, event.connection)
self.accept(event.delivery)
else:
self.command.warn("Processing request {} failed", request)
self.reject(event.delivery)
if self.processed_requests == self.command.desired_messages:
self.close(event)
|
[
"proton.Message",
"traceback.print_exc"
] |
[((3978, 3995), 'proton.Message', '_proton.Message', ([], {}), '()\n', (3993, 3995), True, 'import proton as _proton\n'), ((4220, 4242), 'traceback.print_exc', '_traceback.print_exc', ([], {}), '()\n', (4240, 4242), True, 'import traceback as _traceback\n')]
|
import argparse
import math
import random
import os
import copy
from numpy.core.fromnumeric import resize
import dnnlib
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from torch_utils import image_transforms
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from model import Generator, MappingNetwork, G_NET
from finegan_config import finegan_config
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="mpnet trainer")
parser.add_argument('--arch', type=str, default='stylegan2', help='model architectures (stylegan2 | swagan)')
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=8,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--mixing", type=float, default=0.9, help="probability of latent code mixing"
)
parser.add_argument(
"--style_model",
type=str,
default=None,
help="path to stylegan",
)
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
args = parser.parse_args()
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
if args.arch == 'stylegan2':
from model import Generator, Discriminator
elif args.arch == 'swagan':
from swagan import Generator, Discriminator
style_generator = Generator(
size=args.size,
style_dim=args.latent,
n_mlp=args.n_mlp,
channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size
).to(device)
assert args.style_model is not None
print("load style model:", args.style_model)
style_dict = torch.load(args.style_model, map_location=lambda storage, loc: storage)
style_generator.load_state_dict(style_dict["g_ema"], strict=False)
style_generator.eval()
discriminator.load_state_dict(style_dict["d"], strict=False)
discriminator.eval()
with torch.no_grad():
sample_z = torch.randn(args.batch, args.latent, device=device)
sample_img, _ = style_generator([sample_z])
print(discriminator(sample_img))
utils.save_image(
sample_img,
f"style_sample.png",
nrow=8,
normalize=True,
range=(-1, 1),
)
|
[
"swagan.Discriminator",
"torch.distributed.init_process_group",
"argparse.ArgumentParser",
"torch.load",
"torch.randn",
"torchvision.utils.save_image",
"torch.cuda.set_device",
"torch.no_grad",
"swagan.Generator",
"distributed.synchronize"
] |
[((755, 807), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""mpnet trainer"""'}), "(description='mpnet trainer')\n", (778, 807), False, 'import argparse\n'), ((2824, 2895), 'torch.load', 'torch.load', (['args.style_model'], {'map_location': '(lambda storage, loc: storage)'}), '(args.style_model, map_location=lambda storage, loc: storage)\n', (2834, 2895), False, 'import torch\n'), ((3280, 3372), 'torchvision.utils.save_image', 'utils.save_image', (['sample_img', 'f"""style_sample.png"""'], {'nrow': '(8)', 'normalize': '(True)', 'range': '(-1, 1)'}), "(sample_img, f'style_sample.png', nrow=8, normalize=True,\n range=(-1, 1))\n", (3296, 3372), False, 'from torchvision import transforms, utils\n'), ((2081, 2119), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (2102, 2119), False, 'import torch\n'), ((2128, 2202), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (2164, 2202), False, 'import torch\n'), ((2211, 2224), 'distributed.synchronize', 'synchronize', ([], {}), '()\n', (2222, 2224), False, 'from distributed import get_rank, synchronize, reduce_loss_dict, reduce_sum, get_world_size\n'), ((3094, 3109), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3107, 3109), False, 'import torch\n'), ((3130, 3181), 'torch.randn', 'torch.randn', (['args.batch', 'args.latent'], {'device': 'device'}), '(args.batch, args.latent, device=device)\n', (3141, 3181), False, 'import torch\n'), ((2485, 2599), 'swagan.Generator', 'Generator', ([], {'size': 'args.size', 'style_dim': 'args.latent', 'n_mlp': 'args.n_mlp', 'channel_multiplier': 'args.channel_multiplier'}), '(size=args.size, style_dim=args.latent, n_mlp=args.n_mlp,\n channel_multiplier=args.channel_multiplier)\n', (2494, 2599), False, 'from swagan import Generator, Discriminator\n'), ((2666, 2690), 'swagan.Discriminator', 'Discriminator', (['args.size'], {}), '(args.size)\n', (2679, 2690), False, 'from swagan import Generator, Discriminator\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.